diff --git a/.github/scripts/filter-matrix.py b/.github/scripts/filter-matrix.py index 14bcb7028d..ee3cfd491a 100644 --- a/.github/scripts/filter-matrix.py +++ b/.github/scripts/filter-matrix.py @@ -8,7 +8,7 @@ # currently we don't support python 3.13t due to tensorrt does not support 3.13t disabled_python_versions: List[str] = ["3.13t", "3.14", "3.14t"] -disabled_cuda_versions: List[str] = ["cu130"] +disabled_cuda_versions: List[str] = [] # jetpack 6.2 only officially supports python 3.10 and cu126 jetpack_python_versions: List[str] = ["3.10"] diff --git a/.github/scripts/generate-release-matrix.py b/.github/scripts/generate-release-matrix.py index 9f740c2f4f..b5926b2f2e 100644 --- a/.github/scripts/generate-release-matrix.py +++ b/.github/scripts/generate-release-matrix.py @@ -5,8 +5,8 @@ import sys RELEASE_CUDA_VERSION = { - "wheel": ["cu129"], - "tarball": ["cu129"], + "wheel": ["cu130"], + "tarball": ["cu130"], } RELEASE_PYTHON_VERSION = { "wheel": ["3.10", "3.11", "3.12", "3.13"], @@ -15,7 +15,7 @@ sbsa_container_image: str = "quay.io/pypa/manylinux_2_34_aarch64" CXX11_TARBALL_CONTAINER_IMAGE = { - "cu129": "pytorch/libtorch-cxx11-builder:cuda12.9-main", + "cu130": "pytorch/libtorch-cxx11-builder:cuda13.0-main", } diff --git a/.github/scripts/generate-tensorrt-test-matrix.py b/.github/scripts/generate-tensorrt-test-matrix.py index 3c569419d6..fba74d0514 100644 --- a/.github/scripts/generate-tensorrt-test-matrix.py +++ b/.github/scripts/generate-tensorrt-test-matrix.py @@ -11,9 +11,9 @@ # channel: nightly if the future tensorRT version test workflow is triggered from the main branch or your personal branch # channel: test if the future tensorRT version test workflow is triggered from the release branch(release/2.5 etc....) CUDA_VERSIONS_DICT = { - "nightly": ["cu129"], - "test": ["cu126", "cu128", "cu129"], - "release": ["cu126", "cu128", "cu129"], + "nightly": ["cu130"], + "test": ["cu126", "cu128", "cu130"], + "release": ["cu126", "cu128", "cu130"], } # please update the python version you want to test with the future tensorRT version here @@ -21,8 +21,8 @@ # channel: test if the future tensorRT version test workflow is triggered from the release branch(release/2.5 etc....) PYTHON_VERSIONS_DICT = { "nightly": ["3.11"], - "test": ["3.9", "3.10", "3.11", "3.12", "3.13"], - "release": ["3.9", "3.10", "3.11", "3.12", "3.13"], + "test": ["3.10", "3.11", "3.12", "3.13"], + "release": ["3.10", "3.11", "3.12", "3.13"], } # please update the future tensorRT version you want to test here diff --git a/.github/workflows/build_windows.yml b/.github/workflows/build_windows.yml index 6fbd343fdb..c2d4b0b20b 100644 --- a/.github/workflows/build_windows.yml +++ b/.github/workflows/build_windows.yml @@ -241,6 +241,7 @@ jobs: env: ENV_SCRIPT: ${{ inputs.env-script }} run: | + set -x source "${BUILD_ENV_FILE}" if [[ -z "${ENV_SCRIPT}" ]]; then ${CONDA_RUN} python setup.py clean diff --git a/.github/workflows/docgen.yml b/.github/workflows/docgen.yml index a943efe302..14ea428dee 100644 --- a/.github/workflows/docgen.yml +++ b/.github/workflows/docgen.yml @@ -14,12 +14,12 @@ jobs: if: ${{ ! contains(github.actor, 'pytorchbot') }} environment: pytorchbot-env container: - image: docker.io/pytorch/manylinux2_28-builder:cuda12.9 + image: docker.io/pytorch/manylinux2_28-builder:cuda13.0 options: --gpus all env: - CUDA_HOME: /usr/local/cuda-12.9 - VERSION_SUFFIX: cu129 - CU_VERSION: cu129 + CUDA_HOME: /usr/local/cuda-13.0 + VERSION_SUFFIX: cu130 + CU_VERSION: cu130 CHANNEL: nightly CI_BUILD: 1 steps: @@ -35,14 +35,14 @@ jobs: - name: Install base deps run: | python3 -m pip install pip --upgrade - python3 -m pip install pyyaml numpy torch --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu129 + python3 -m pip install pyyaml numpy torch --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu130 ./packaging/pre_build_script.sh - name: Get HEAD SHA id: vars run: echo "sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - name: Build Python Package run: | - python3 -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu129 + python3 -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 - name: Generate New Docs run: | cd docsrc diff --git a/MODULE.bazel b/MODULE.bazel index fcf95959a8..34a2a28f60 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -36,7 +36,7 @@ new_local_repository( new_local_repository( name = "cuda", build_file = "@//third_party/cuda:BUILD", - path = "/usr/local/cuda-12.9/", + path = "/usr/local/cuda-13.0/", ) # for Jetson @@ -65,7 +65,7 @@ http_archive( name = "libtorch", build_file = "@//third_party/libtorch:BUILD", strip_prefix = "libtorch", - urls = ["https://download.pytorch.org/libtorch/nightly/cu129/libtorch-shared-with-deps-latest.zip"], + urls = ["https://download.pytorch.org/libtorch/nightly/cu130/libtorch-shared-with-deps-latest.zip"], ) # in aarch64 platform you can get libtorch via either local or wheel file @@ -83,7 +83,7 @@ http_archive( name = "libtorch_win", build_file = "@//third_party/libtorch:BUILD", strip_prefix = "libtorch", - urls = ["https://download.pytorch.org/libtorch/nightly/cu129/libtorch-win-shared-with-deps-latest.zip"], + urls = ["https://download.pytorch.org/libtorch/nightly/cu130/libtorch-win-shared-with-deps-latest.zip"], ) http_archive( diff --git a/README.md b/README.md index 896bcccb37..081683cab9 100644 --- a/README.md +++ b/README.md @@ -5,9 +5,9 @@ Torch-TensorRT

Easily achieve the best inference performance for any PyTorch model on the NVIDIA platform.

[![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/) -[![pytorch](https://img.shields.io/badge/PyTorch-2.8-green)](https://download.pytorch.org/whl/nightly/cu128) -[![cuda](https://img.shields.io/badge/CUDA-12.8-green)](https://developer.nvidia.com/cuda-downloads) -[![trt](https://img.shields.io/badge/TensorRT-10.13.2.6-green)](https://github.com/nvidia/tensorrt-llm) +[![pytorch](https://img.shields.io/badge/PyTorch-2.9-green)](https://download.pytorch.org/whl/nightly/cu130) +[![cuda](https://img.shields.io/badge/CUDA-13.0-green)](https://developer.nvidia.com/cuda-downloads) +[![trt](https://img.shields.io/badge/TensorRT-10.12.0-green)](https://github.com/nvidia/tensorrt) [![license](https://img.shields.io/badge/license-BSD--3--Clause-blue)](./LICENSE) [![Linux x86-64 Nightly Wheels](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-x86_64.yml/badge.svg?branch=nightly)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-x86_64.yml) [![Linux SBSA Nightly Wheels](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-aarch64.yml/badge.svg?branch=nightly)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux-aarch64.yml) @@ -27,7 +27,7 @@ pip install torch-tensorrt Nightly versions of Torch-TensorRT are published on the PyTorch package index ```bash -pip install --pre torch-tensorrt --index-url https://download.pytorch.org/whl/nightly/cu128 +pip install --pre torch-tensorrt --index-url https://download.pytorch.org/whl/nightly/cu130 ``` Torch-TensorRT is also distributed in the ready-to-run [NVIDIA NGC PyTorch Container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) which has all dependencies with the proper versions and example notebooks included. diff --git a/docker/Dockerfile b/docker/Dockerfile index b218211e38..2138c94100 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -2,9 +2,9 @@ # Base image starts with CUDA #TODO: cuda version -ARG BASE_IMG=nvidia/cuda:12.9.0-devel-ubuntu22.04 +ARG BASE_IMG=nvidia/cuda:13.0.0-devel-ubuntu22.04 FROM ${BASE_IMG} as base -ENV BASE_IMG=nvidia/cuda:12.9.0-devel-ubuntu22.04 +ENV BASE_IMG=nvidia/cuda:13.0.0-devel-ubuntu22.04 ARG TENSORRT_VERSION ENV TENSORRT_VERSION=${TENSORRT_VERSION} diff --git a/docker/dist-build.sh b/docker/dist-build.sh index 7790c05f82..7ade4f10fd 100755 --- a/docker/dist-build.sh +++ b/docker/dist-build.sh @@ -4,7 +4,7 @@ set -x TOP_DIR=$(cd $(dirname $0); pwd)/.. -BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu129 -w dist" +BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 -w dist" # TensorRT restricts our pip version cd ${TOP_DIR} \ diff --git a/docsrc/getting_started/installation.rst b/docsrc/getting_started/installation.rst index 700d7ee74d..ceba409fec 100644 --- a/docsrc/getting_started/installation.rst +++ b/docsrc/getting_started/installation.rst @@ -46,7 +46,7 @@ Torch-TensorRT distributed nightlies targeting the PyTorch nightly. These can be .. code-block:: sh - python -m pip install --pre torch torch-tensorrt tensorrt --extra-index-url https://download.pytorch.org/whl/nightly/cu128 + python -m pip install --pre torch torch-tensorrt tensorrt --extra-index-url https://download.pytorch.org/whl/nightly/cu130 @@ -131,7 +131,7 @@ Once the WORKSPACE has been configured properly, all that is required to build t .. code-block:: sh - python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu128 + python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 If you use the ``uv`` (`https://docs.astral.sh/uv/ `_) tool to manage python and your projects, the command is slightly simpler @@ -146,7 +146,7 @@ To build the wheel file .. code-block:: sh - python -m pip wheel --no-deps --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu128 -w dist + python -m pip wheel --no-deps --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 -w dist Additional Build Options ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -164,7 +164,7 @@ which has implications for features like serialization. .. code-block:: sh - PYTHON_ONLY=1 python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu128 + PYTHON_ONLY=1 python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 No TorchScript Frontend @@ -175,7 +175,7 @@ of C++ code that is no longer necessary for most users. Therefore you can exclud .. code-block:: sh - NO_TORCHSCRIPT=1 python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu128 + NO_TORCHSCRIPT=1 python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 Building the C++ Library Standalone (TorchScript Only) @@ -245,7 +245,7 @@ Build steps * Open the app "x64 Native Tools Command Prompt for VS 2022" - note that Admin privileges may be necessary * Ensure Bazelisk (Bazel launcher) is installed on your machine and available from the command line. Package installers such as Chocolatey can be used to install Bazelisk -* Install latest version of Torch (i.e. with ``pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu128``) +* Install latest version of Torch (i.e. with ``pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu130``) * Clone the Torch-TensorRT repository and navigate to its root directory * Run ``pip install ninja wheel setuptools`` * Run ``pip install --pre -r py/requirements.txt`` diff --git a/py/requirements.txt b/py/requirements.txt index a34a458938..0d7690bfaf 100644 --- a/py/requirements.txt +++ b/py/requirements.txt @@ -1,8 +1,9 @@ numpy packaging pybind11==2.6.2 ---extra-index-url https://download.pytorch.org/whl/nightly/cu129 +--extra-index-url https://download.pytorch.org/whl/nightly/cu130 torch>=2.9.0.dev,<2.10.0 --extra-index-url https://pypi.ngc.nvidia.com pyyaml -dllist \ No newline at end of file +dllist +setuptools \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 74547255d0..63ed81f85f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -100,14 +100,19 @@ index-strategy = "unsafe-best-match" [tool.uv.sources] torch = [ - { index = "pytorch-nightly-cu129", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" }, + { index = "pytorch-nightly-cu130", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" }, { index = "jetson-containers", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" }, ] torchvision = [ - { index = "pytorch-nightly-cu129", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" }, + { index = "pytorch-nightly-cu130", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" }, { index = "jetson-containers", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" }, ] +[[tool.uv.index]] +name = "pytorch-nightly-cu130" +url = "https://download.pytorch.org/whl/nightly/cu130" +explicit = false + [[tool.uv.index]] name = "pytorch-nightly-cu129" url = "https://download.pytorch.org/whl/nightly/cu129" diff --git a/tests/py/requirements.txt b/tests/py/requirements.txt index 5bfd8a5bcb..6c6cbf27dc 100644 --- a/tests/py/requirements.txt +++ b/tests/py/requirements.txt @@ -3,6 +3,7 @@ expecttest==0.1.6 networkx==2.8.8 numpy +setuptools parameterized>=0.2.0 pytest>=8.2.1 pytest-xdist>=3.6.1 @@ -13,6 +14,6 @@ nvidia-modelopt[all]; python_version >'3.9' and python_version <'3.13' # flashinfer-python is not supported for python version 3.13 or higher # flashinfer-python is broken on python 3.9 at the moment, so skip it for now flashinfer-python; python_version >'3.9' and python_version <'3.13' ---extra-index-url https://download.pytorch.org/whl/nightly/cu129 +--extra-index-url https://download.pytorch.org/whl/nightly/cu130 torchvision>=0.24.0.dev,<0.25.0 timm>=1.0.3 \ No newline at end of file diff --git a/tools/perf/Flux/create_env.sh b/tools/perf/Flux/create_env.sh index 24470be344..330e6c53d6 100644 --- a/tools/perf/Flux/create_env.sh +++ b/tools/perf/Flux/create_env.sh @@ -14,10 +14,10 @@ apt install bazel bazel cd /home/TensorRT -python -m pip install --pre -e . --extra-index-url https://download.pytorch.org/whl/nightly/cu128 -pip install tensorrt==10.9.0.34 --force-reinstall +python -m pip install --pre -e . --extra-index-url https://download.pytorch.org/whl/nightly/cu130 +pip install tensorrt==10.13.2.6 --force-reinstall -pip3 install --pre torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu128 +pip3 install --pre torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu130 pip install sentencepiece=="0.2.0" transformers=="4.48.2" accelerate=="1.3.0" diffusers=="0.32.2" protobuf=="5.29.3" diff --git a/uv.lock b/uv.lock index eb2c536573..083ddfa72e 100644 --- a/uv.lock +++ b/uv.lock @@ -42,7 +42,7 @@ version = "1.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub", marker = "sys_platform == 'linux' or sys_platform == 'windows'" }, - { name = "numpy", version = "1.26.4", source = { registry = "https://download.pytorch.org/whl/nightly/cu129" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'windows') or ('tegra' in platform_release and sys_platform == 'linux') or ('tegra' in platform_release and sys_platform == 'windows')" }, + { name = "numpy", version = "1.26.4", source = { registry = "https://download.pytorch.org/whl/nightly/cu130" }, marker = "(python_full_version < '3.10' and sys_platform == 'linux') or (python_full_version < '3.10' and sys_platform == 'windows') or ('tegra' in platform_release and sys_platform == 'linux') or ('tegra' in platform_release and sys_platform == 'windows')" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.10' and 'tegra' not in platform_release and sys_platform == 'linux') or (python_full_version >= '3.10' and 'tegra' not in platform_release and sys_platform == 'windows')" }, { name = "packaging", marker = "sys_platform == 'linux' or sys_platform == 'windows'" }, { name = "psutil", marker = "sys_platform == 'linux' or sys_platform == 'windows'" },