diff --git a/.circleci/config.yml b/.circleci/config.yml index 257ccfaf51..77a1fd036f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -236,7 +236,7 @@ commands: mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE bazel query 'kind(cc_*, tests(//tests))' --noshow_progress >> /tmp/test_manifest.txt circleci tests split < /tmp/test_manifest.txt > /tmp/node_test_manifest.txt - bazel test $(cat /tmp/node_test_manifest.txt) --test_arg=--gtest_output=xml:/tmp/artifacts/test_results/ --jobs 4 --config ci_testing --config pre_cxx11_abi --noshow_progress + bazel test $(cat /tmp/node_test_manifest.txt) --test_arg=--gtest_output=xml:/tmp/artifacts/test_results/ --jobs 2 --config ci_testing --config pre_cxx11_abi --noshow_progress - run: name: Collect logs when: on_fail @@ -265,7 +265,6 @@ commands: set -e mkdir -p /tmp/artifacts/test_results cd tests/py - pip3 install -r requirements.txt pytest --junitxml=/tmp/artifacts/test_results/api/api_test_results.xml api/ pytest --junitxml=/tmp/artifacts/test_results/integrations/integrations_test_results.xml integrations/ cd ~/project @@ -482,12 +481,20 @@ jobs: path: /tmp/dist/nightly destination: x86_64-pyt-nightly - test-core-cpp-x86_64-pyt-release: + test-core-cpp-x86_64: parameters: torch-build: type: string torch-build-index: type: string + trt-version-short: + type: string + trt-version-long: + type: string + cudnn-version: + type: string + channel: + type: string machine: image: ubuntu-2004-cuda-11.4:202110-01 resource_class: gpu.nvidia.large @@ -497,12 +504,12 @@ jobs: - create-env: os: "ubuntu2004" platform: "x86_64" - cudnn-version: << pipeline.parameters.cudnn-release-version >> - trt-version-short: << pipeline.parameters.trt-release-version-short >> + cudnn-version: << parameters.cudnn-version >> + trt-version-short: << parameters.trt-version-short >> bazel-version: "5.1.1" bazel-platform: "x86_64" - create-py-env: - trt-version-long: << pipeline.parameters.trt-release-version-long >> + trt-version-long: << parameters.trt-version-long >> - install-torch-from-index: torch-build: << parameters.torch-build >> torch-build-index: << parameters.torch-build-index >> @@ -510,53 +517,62 @@ jobs: at: /tmp/dist - run: name: "Install torch-tensorrt" - command: pip3 install /tmp/dist/release/* + command: pip3 install /tmp/dist/<< parameters.channel >>/* - dump-test-env - test-ts-core - test-ts-py-x86_64-pyt-release: + test-py-ts-x86_64: parameters: + channel: + type: string torch-build: type: string torch-build-index: type: string + trt-version-long: + type: string machine: image: ubuntu-2004-cuda-11.4:202110-01 resource_class: gpu.nvidia.large steps: - checkout - create-py-env: - trt-version-long: << pipeline.parameters.trt-release-version-long >> - - install-torch-from-index: - torch-build: << parameters.torch-build >> - torch-build-index: << parameters.torch-build-index >> + trt-version-long: << parameters.trt-version-long >> - attach_workspace: at: /tmp/dist - run: name: "Install torch-tensorrt" - command: pip3 install /tmp/dist/release/* + command: pip3 install /tmp/dist/<< parameters.channel >>/* + # We install torch after torch-trt because pip automatically enforces the version constraint otherwise + - install-torch-from-index: + torch-build: << parameters.torch-build >> + torch-build-index: << parameters.torch-build-index >> - dump-test-env - test-ts-py-api - test-x86_64-pyt-nightly: + test-py-fx-x86_64: parameters: + channel: + type: string torch-build: type: string torch-build-index: type: string + trt-version-long: + type: string machine: image: ubuntu-2004-cuda-11.4:202110-01 resource_class: gpu.nvidia.large steps: - checkout - create-py-env: - trt-version-long: << pipeline.parameters.trt-nightly-version-long >> + trt-version-long: << parameters.trt-version-long >> - attach_workspace: at: /tmp/dist/ - run: name: "Install torch-tensorrt" - command: pip3 install /tmp/dist/nightly/* - # We install torch after torch-trt because pip automatically enforces the version constraint otherwise, swap back after versions are synced + command: pip3 install /tmp/dist/<< parameters.channel >>/* + # We install torch after torch-trt because pip automatically enforces the version constraint otherwise - install-torch-from-index: torch-build: << parameters.torch-build >> torch-build-index: << parameters.torch-build-index >> @@ -573,30 +589,30 @@ parameters: default: "https://download.pytorch.org/whl/nightly/cu113" cudnn-nightly-version: type: string - default: "8.2.1" + default: "8.4.1" trt-nightly-version-short: type: string - default: "8.2.4" + default: "8.4.1" trt-nightly-version-long: type: string - default: "8.2.4.2" + default: "8.4.1.5" # Release platform config torch-release-build: type: string - default: "1.11.0+cu113" + default: "1.12.0+cu113" torch-release-build-index: type: string default: "https://download.pytorch.org/whl/cu113" cudnn-release-version: type: string - default: "8.2.1" + default: "8.4.1" trt-release-version-short: type: string - default: "8.2.4" + default: "8.4.1" trt-release-version-long: type: string - default: "8.2.4.2" + default: "8.4.1.5" # Jetson platform config torch-jetson-build: @@ -632,21 +648,74 @@ workflows: jetpack-version: << pipeline.parameters.jetpack-version >> python-version: 3.8.10 + + - build-x86_64-pyt-release: torch-build: << pipeline.parameters.torch-release-build >> torch-build-index: << pipeline.parameters.torch-release-build-index >> - - test-core-cpp-x86_64-pyt-release: + + - test-core-cpp-x86_64: + name: test-core-cpp-x86_64-pyt-release + channel: "release" torch-build: << pipeline.parameters.torch-release-build >> torch-build-index: << pipeline.parameters.torch-release-build-index >> + trt-version-short: << pipeline.parameters.trt-release-version-short >> + trt-version-long: << pipeline.parameters.trt-release-version-long >> + cudnn-version: << pipeline.parameters.cudnn-release-version >> requires: - build-x86_64-pyt-release + - test-py-ts-x86_64: + name: test-py-ts-x86_64-pyt-release + channel: "release" + torch-build: << pipeline.parameters.torch-release-build >> + torch-build-index: << pipeline.parameters.torch-release-build-index >> + trt-version-long: << pipeline.parameters.trt-release-version-long >> + requires: + - build-x86_64-pyt-release + + - test-py-ts-x86_64: + name: test-py-fx-x86_64-pyt-release + channel: "release" + torch-build: << pipeline.parameters.torch-release-build >> + torch-build-index: << pipeline.parameters.torch-release-build-index >> + trt-version-long: << pipeline.parameters.trt-release-version-long >> + requires: + - build-x86_64-pyt-release + + + + - build-x86_64-pyt-nightly: torch-build: << pipeline.parameters.torch-nightly-build >> torch-build-index: << pipeline.parameters.torch-nightly-build-index >> - - test-x86_64-pyt-nightly: + + - test-core-cpp-x86_64: + name: test-core-cpp-x86_64-pyt-nightly + channel: "nightly" + torch-build: << pipeline.parameters.torch-nightly-build >> + torch-build-index: << pipeline.parameters.torch-nightly-build-index >> + trt-version-short: << pipeline.parameters.trt-nightly-version-short >> + trt-version-long: << pipeline.parameters.trt-nightly-version-long >> + cudnn-version: << pipeline.parameters.cudnn-nightly-version >> + requires: + - build-x86_64-pyt-nightly + + - test-py-ts-x86_64: + name: test-py-ts-x86_64-pyt-nightly + channel: "nightly" torch-build: << pipeline.parameters.torch-nightly-build >> torch-build-index: << pipeline.parameters.torch-nightly-build-index >> + trt-version-long: << pipeline.parameters.trt-nightly-version-long >> + requires: + - build-x86_64-pyt-nightly + + - test-py-fx-x86_64: + name: test-py-fx-x86_64-pyt-nightly + channel: "nightly" + torch-build: << pipeline.parameters.torch-nightly-build >> + torch-build-index: << pipeline.parameters.torch-nightly-build-index >> + trt-version-long: << pipeline.parameters.trt-nightly-version-long >> requires: - build-x86_64-pyt-nightly @@ -657,26 +726,73 @@ workflows: jetpack-version: << pipeline.parameters.jetpack-version >> python-version: 3.8.10 + + - build-x86_64-pyt-release: torch-build: << pipeline.parameters.torch-release-build >> torch-build-index: << pipeline.parameters.torch-release-build-index >> - - test-core-cpp-x86_64-pyt-release: + + - test-core-cpp-x86_64: + name: test-core-cpp-x86_64-pyt-release + channel: "release" + torch-build: << pipeline.parameters.torch-release-build >> + torch-build-index: << pipeline.parameters.torch-release-build-index >> + trt-version-short: << pipeline.parameters.trt-release-version-short >> + trt-version-long: << pipeline.parameters.trt-release-version-long >> + cudnn-version: << pipeline.parameters.cudnn-release-version >> + requires: + - build-x86_64-pyt-release + + - test-py-ts-x86_64: + name: test-py-ts-x86_64-pyt-release + channel: "release" torch-build: << pipeline.parameters.torch-release-build >> torch-build-index: << pipeline.parameters.torch-release-build-index >> + trt-version-long: << pipeline.parameters.trt-release-version-long >> requires: - build-x86_64-pyt-release - - test-ts-py-x86_64-pyt-release: + + - test-py-ts-x86_64: + name: test-py-fx-x86_64-pyt-release + channel: "release" torch-build: << pipeline.parameters.torch-release-build >> torch-build-index: << pipeline.parameters.torch-release-build-index >> + trt-version-long: << pipeline.parameters.trt-release-version-long >> requires: - build-x86_64-pyt-release + + - build-x86_64-pyt-nightly: torch-build: << pipeline.parameters.torch-nightly-build >> torch-build-index: << pipeline.parameters.torch-nightly-build-index >> - - test-x86_64-pyt-nightly: + + - test-core-cpp-x86_64: + name: test-core-cpp-x86_64-pyt-nightly + channel: "nightly" torch-build: << pipeline.parameters.torch-nightly-build >> torch-build-index: << pipeline.parameters.torch-nightly-build-index >> + trt-version-short: << pipeline.parameters.trt-nightly-version-short >> + trt-version-long: << pipeline.parameters.trt-nightly-version-long >> + cudnn-version: << pipeline.parameters.cudnn-nightly-version >> + requires: + - build-x86_64-pyt-nightly + + - test-py-ts-x86_64: + name: test-py-ts-x86_64-pyt-nightly + channel: "nightly" + torch-build: << pipeline.parameters.torch-nightly-build >> + torch-build-index: << pipeline.parameters.torch-nightly-build-index >> + trt-version-long: << pipeline.parameters.trt-nightly-version-long >> + requires: + - build-x86_64-pyt-nightly + + - test-py-fx-x86_64: + name: test-py-fx-x86_64-pyt-nightly + channel: "nightly" + torch-build: << pipeline.parameters.torch-nightly-build >> + torch-build-index: << pipeline.parameters.torch-nightly-build-index >> + trt-version-long: << pipeline.parameters.trt-nightly-version-long >> requires: - build-x86_64-pyt-nightly diff --git a/py/setup.py b/py/setup.py index a9913084ec..f11e348b02 100644 --- a/py/setup.py +++ b/py/setup.py @@ -22,9 +22,13 @@ JETPACK_VERSION = None -__version__ = '1.2.0a0' FX_ONLY = False +__version__ = '1.2.0a0' +__cuda_version__ = '11.3' +__cudnn_version__ = '8.2' +__tensorrt_version__ = '8.2' + def get_git_revision_short_hash() -> str: return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip() @@ -51,8 +55,10 @@ def get_git_revision_short_hash() -> str: JETPACK_VERSION = "4.5" elif version == "4.6": JETPACK_VERSION = "4.6" + elif version == "5.0": + JETPACK_VERSION = "4.6" if not JETPACK_VERSION: - warnings.warn("Assuming jetpack version to be 4.6, if not use the --jetpack-version option") + warnings.warn("Assuming jetpack version to be 4.6 or greater, if not use the --jetpack-version option") JETPACK_VERSION = "4.6" @@ -103,7 +109,7 @@ def build_libtorchtrt_pre_cxx11_abi(develop=True, use_dist_dir=True, cxx11_abi=F print("Jetpack version: 4.5") elif JETPACK_VERSION == "4.6": cmd.append("--platforms=//toolchains:jetpack_4.6") - print("Jetpack version: 4.6") + print("Jetpack version: >=4.6") print("building libtorchtrt") status_code = subprocess.run(cmd).returncode @@ -118,7 +124,10 @@ def gen_version_file(): with open(dir_path + '/torch_tensorrt/_version.py', 'w') as f: print("creating version file") - f.write("__version__ = \"" + __version__ + '\"') + f.write("__version__ = \"" + __version__ + '\"\n') + f.write("__cuda_version__ = \"" + __cuda_version__ + '\"\n') + f.write("__cudnn_version__ = \"" + __cudnn_version__ + '\"\n') + f.write("__tensorrt_version__ = \"" + __tensorrt_version__ + '\"\n') def copy_libtorchtrt(multilinux=False): @@ -299,7 +308,7 @@ def run(self): long_description=long_description, ext_modules=ext_modules, install_requires=[ - 'torch>=1.11.0+cu113,<1.12.0', + 'torch>=1.12.0+cu113,<1.13.0', ], setup_requires=[], cmdclass={ diff --git a/py/torch_tensorrt/__init__.py b/py/torch_tensorrt/__init__.py index 5bf7e0e334..59705c3c1f 100644 --- a/py/torch_tensorrt/__init__.py +++ b/py/torch_tensorrt/__init__.py @@ -1,13 +1,87 @@ +import ctypes +import glob import os import sys +import platform +import warnings +from torch_tensorrt._version import __version__, __cuda_version__, __cudnn_version__, __tensorrt_version__ + if sys.version_info < (3,): raise Exception("Python 2 has reached end-of-life and is not supported by Torch-TensorRT") -import ctypes +def _parse_semver(version): + split = version.split(".") + if len(split) < 3: + split.append("") + + return { + "major": split[0], + "minor": split[1], + "patch": split[2] + } + +def _find_lib(name, paths): + for path in paths: + libpath = os.path.join(path, name) + if os.path.isfile(libpath): + return libpath + + raise FileNotFoundError( + f"Could not find {name}\n Search paths: {paths}" + ) + +try: + import tensorrt +except: + cuda_version = _parse_semver(__cuda_version__) + cudnn_version = _parse_semver(__cudnn_version__) + tensorrt_version = _parse_semver(__tensorrt_version__) + + CUDA_MAJOR = cuda_version["major"] + CUDNN_MAJOR = cudnn_version["major"] + TENSORRT_MAJOR = tensorrt_version["major"] + + if sys.platform.startswith("win"): + WIN_LIBS = [ + "nvinfer.dll", + "nvinfer_plugin.dll", + ] + + WIN_PATHS = os.environ["PATH"].split(os.path.pathsep) + + + for lib in WIN_LIBS: + ctypes.CDLL(_find_lib(lib, WIN_PATHS)) + + elif sys.platform.startswith("linux"): + LINUX_PATHS = [ + "/usr/local/cuda/lib64", + ] + + if "LD_LIBRARY_PATH" in os.environ: + LINUX_PATHS += os.environ["LD_LIBRARY_PATH"].split(os.path.pathsep) + + if platform.uname().processor == "x86_64": + LINUX_PATHS += [ + "/usr/lib/x86_64-linux-gnu", + ] + + elif platform.uname().processor == "aarch64": + LINUX_PATHS += [ + "/usr/lib/aarch64-linux-gnu" + ] + + LINUX_LIBS = [ + f"libnvinfer.so.{TENSORRT_MAJOR}", + f"libnvinfer_plugin.so.{TENSORRT_MAJOR}", + ] + + for lib in LINUX_LIBS: + ctypes.CDLL(_find_lib(lib, LINUX_PATHS)) + import torch -from torch_tensorrt._version import __version__ from torch_tensorrt._compile import * from torch_tensorrt._util import * from torch_tensorrt import ts diff --git a/py/torch_tensorrt/_compile.py b/py/torch_tensorrt/_compile.py index c6550ae7c7..b0eb8f3b97 100644 --- a/py/torch_tensorrt/_compile.py +++ b/py/torch_tensorrt/_compile.py @@ -5,9 +5,9 @@ import torch import torch.fx from enum import Enum -import torch_tensorrt.fx -from torch_tensorrt.fx.lower import lower_to_trt -from torch_tensorrt.fx.utils import LowerPrecision +#import torch_tensorrt.fx +#from torch_tensorrt.fx.lower import lower_to_trt +#from torch_tensorrt.fx.utils import LowerPrecision class _IRType(Enum): """Enum to set the minimum required logging level to print a message to stdout