Skip to content

Commit f85c6ad

Browse files
committed
infra: CI updates
1 parent 98520db commit f85c6ad

File tree

5 files changed

+118
-81
lines changed

5 files changed

+118
-81
lines changed

.gitignore

+2-1
Original file line numberDiff line numberDiff line change
@@ -74,4 +74,5 @@ tests/py/dynamo/models/*.ts
7474
tests/py/dynamo/models/*.ep
7575
*.deb
7676
*.tar.xz
77-
MODULE.bazel.lock
77+
MODULE.bazel.lock
78+
*.whl

MODULE.bazel

+7-8
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,6 @@ http_archive(
6969
urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
7070
)
7171

72-
7372
http_archive(
7473
name = "libtorch_win",
7574
build_file = "@//third_party/libtorch:BUILD",
@@ -81,13 +80,13 @@ http_archive(
8180
# It is possible to specify a wheel file to use as the libtorch source by providing the URL below and
8281
# using the build flag `--//toolchains/dep_src:torch="whl"`
8382

84-
http_archive(
85-
name = "torch_whl",
86-
build_file = "@//third_party/libtorch:BUILD",
87-
strip_prefix = "torch",
88-
type = "zip",
89-
urls = ["https://pypi.jetson-ai-lab.dev/jp6/cu126/+f/52c/2cbdd62b78f32/torch-2.7.0-cp310-cp310-linux_aarch64.whl#sha256=52c2cbdd62b78f32c51fa178212e4721241a2ba9e0c4d7d690dd808bd890d51b"],
90-
)
83+
# http_archive(
84+
# name = "torch_whl",
85+
# build_file = "@//third_party/libtorch:BUILD",
86+
# strip_prefix = "torch",
87+
# type = "zip",
88+
# urls = ["https://pypi.jetson-ai-lab.dev/jp6/cu126/+f/52c/2cbdd62b78f32/torch-2.7.0-cp310-cp310-linux_aarch64.whl#sha256=52c2cbdd62b78f32c51fa178212e4721241a2ba9e0c4d7d690dd808bd890d51b"],
89+
# )
9190

9291
# Download these tarballs manually from the NVIDIA website
9392
# Either place them in the distdir directory in third_party and use the --distdir flag

pyproject.toml

+4-4
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ requires = [
88
"cffi>=1.15.1",
99
"typing-extensions>=4.7.0",
1010
"future>=0.18.3",
11-
"tensorrt->=10.9.0,<10.10.0",
11+
"tensorrt-cu12>=10.9.0,<10.10.0",
1212
"torch>=2.8.0.dev,<2.9.0",
1313
"pybind11==2.6.2",
1414
"numpy",
@@ -57,9 +57,9 @@ keywords = [
5757
dependencies = [
5858
"torch>=2.8.0.dev,<2.9.0",
5959
"tensorrt>=10.9.0,<10.10.0",
60-
"tensorrt->=10.9.0,<10.10.0",
61-
"tensorrt--bindings>=10.9.0,<10.10.0",
62-
"tensorrt--libs>=10.9.0,<10.10.0",
60+
"tensorrt-cu12>=10.9.0,<10.10.0",
61+
"tensorrt-cu12-bindings>=10.9.0,<10.10.0",
62+
"tensorrt-cu12-libs>=10.9.0,<10.10.0",
6363
"packaging>=23",
6464
"numpy",
6565
"typing-extensions>=4.7.0",

setup.py

+46-55
Original file line numberDiff line numberDiff line change
@@ -79,12 +79,13 @@ def load_dep_info():
7979
dir_path = os.path.join(str(get_root_dir()), "py")
8080

8181
PRE_CXX11_ABI = False
82-
JETPACK_VERSION = None
82+
IS_JETPACK = False
8383
PY_ONLY = False
8484
NO_TS = False
8585
LEGACY = False
8686
RELEASE = False
8787
CI_BUILD = False
88+
IS_SBSA = False
8889

8990
if "--fx-only" in sys.argv:
9091
PY_ONLY = True
@@ -136,40 +137,12 @@ def load_dep_info():
136137
if ci_env_var == "1":
137138
CI_BUILD = True
138139

139-
if "--use-pre-cxx11-abi" in sys.argv:
140-
sys.argv.remove("--use-pre-cxx11-abi")
141-
PRE_CXX11_ABI = True
142-
143-
if (pre_cxx11_abi_env_var := os.environ.get("USE_PRE_CXX11_ABI")) is not None:
144-
if pre_cxx11_abi_env_var == "1":
145-
PRE_CXX11_ABI = True
146-
147140
if platform.uname().processor == "aarch64":
148-
if "--jetpack-version" in sys.argv:
149-
version_idx = sys.argv.index("--jetpack-version") + 1
150-
version = sys.argv[version_idx]
151-
sys.argv.remove(version)
152-
sys.argv.remove("--jetpack-version")
153-
if version == "4.5":
154-
JETPACK_VERSION = "4.5"
155-
elif version == "4.6":
156-
JETPACK_VERSION = "4.6"
157-
elif version == "5.0":
158-
JETPACK_VERSION = "5.0"
159-
elif version == "6.1":
160-
JETPACK_VERSION = "6.1"
161-
162-
if not JETPACK_VERSION:
163-
warnings.warn(
164-
"Assuming jetpack version to be 6.1, if not use the --jetpack-version option"
165-
)
166-
JETPACK_VERSION = "6.1"
167-
168-
if PRE_CXX11_ABI:
169-
warnings.warn(
170-
"Jetson platform detected. Please remove --use-pre-cxx11-abi flag if you are using it."
171-
)
172-
141+
if "--jetpack" in sys.argv:
142+
sys.argv.remove("--jetpack")
143+
IS_JETPACK = True
144+
else:
145+
IS_SBSA = True
173146

174147
BAZEL_EXE = None
175148
if not PY_ONLY:
@@ -204,30 +177,13 @@ def build_libtorchtrt_cxx11_abi(
204177
if target_python:
205178
cmd.append("--config=python")
206179

207-
if pre_cxx11_abi:
208-
cmd.append("--config=pre_cxx11_abi")
209-
print("using PRE CXX11 ABI build")
210-
else:
211-
cmd.append("--config=cxx11_abi")
212-
print("using CXX11 ABI build")
213-
214180
if IS_WINDOWS:
215181
cmd.append("--config=windows")
216182
else:
217183
cmd.append("--config=linux")
218184

219-
if JETPACK_VERSION == "4.5":
220-
cmd.append("--platforms=//toolchains:jetpack_4.5")
221-
print("Jetpack version: 4.5")
222-
elif JETPACK_VERSION == "4.6":
223-
cmd.append("--platforms=//toolchains:jetpack_4.6")
224-
print("Jetpack version: 4.6")
225-
elif JETPACK_VERSION == "5.0":
226-
cmd.append("--platforms=//toolchains:jetpack_5.0")
227-
print("Jetpack version: 5.0")
228-
elif JETPACK_VERSION == "6.1":
229-
cmd.append("--platforms=//toolchains:jetpack_6.1")
230-
print("Jetpack version: 6.1")
185+
if IS_JETPACK:
186+
cmd.append("--config=jetpack")
231187

232188
if CI_BUILD:
233189
cmd.append("--platforms=//toolchains:ci_rhel_x86_64_linux")
@@ -497,17 +453,52 @@ def run(self):
497453
package_data = {}
498454

499455
if not (PY_ONLY or NO_TS):
500-
tensorrt_linux_external_dir = (
456+
tensorrt_windows_external_dir = (
457+
lambda: subprocess.check_output(
458+
[BAZEL_EXE, "query", "@tensorrt_win//:nvinfer", "--output", "location"]
459+
)
460+
.decode("ascii")
461+
.strip()
462+
.split("/BUILD.bazel")[0]
463+
)
464+
465+
tensorrt_x86_64_external_dir = (
501466
lambda: subprocess.check_output(
502467
[BAZEL_EXE, "query", "@tensorrt//:nvinfer", "--output", "location"]
503468
)
504469
.decode("ascii")
505470
.strip()
506471
.split("/BUILD.bazel")[0]
507472
)
473+
474+
tensorrt_sbsa_external_dir = (
475+
lambda: subprocess.check_output(
476+
[BAZEL_EXE, "query", "@tensorrt_sbsa//:nvinfer", "--output", "location"]
477+
)
478+
.decode("ascii")
479+
.strip()
480+
.split("/BUILD.bazel")[0]
481+
)
482+
483+
tensorrt_jetpack_external_dir = (
484+
lambda: subprocess.check_output(
485+
[BAZEL_EXE, "query", "@tensorrt_l4t//:nvinfer", "--output", "location"]
486+
)
487+
.decode("ascii")
488+
.strip()
489+
.split("/BUILD.bazel")[0]
490+
)
491+
492+
if IS_SBSA:
493+
tensorrt_linux_external_dir = tensorrt_sbsa_external_dir()
494+
elif IS_JETPACK:
495+
tensorrt_linux_external_dir = tensorrt_jetpack_external_dir()
496+
else:
497+
tensorrt_linux_external_dir = tensorrt_x86_64_external_dir()
498+
508499
tensorrt_windows_external_dir = (
509500
lambda: subprocess.check_output(
510-
[BAZEL_EXE, "query", "@tensorrt_win//:nvinfer", "--output", "location"]
501+
[BAZEL_EXE, "query", "@tensorrt_windows//:nvinfer", "--output", "location"]
511502
)
512503
.decode("ascii")
513504
.strip()

toolchains/ci_workspaces/MODULE.bazel.tmpl

+59-13
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,10 @@ module(
44
version = "${BUILD_VERSION}"
55
)
66

7-
bazel_dep(name = "googletest", version = "1.14.0")
8-
bazel_dep(name = "platforms", version = "0.0.10")
9-
bazel_dep(name = "rules_cc", version = "0.0.9")
10-
bazel_dep(name = "rules_python", version = "0.34.0")
7+
bazel_dep(name = "googletest", version = "1.16.0")
8+
bazel_dep(name = "platforms", version = "0.0.11")
9+
bazel_dep(name = "rules_cc", version = "0.1.1")
10+
bazel_dep(name = "rules_python", version = "1.3.0")
1111

1212
python = use_extension("@rules_python//python/extensions:python.bzl", "python")
1313
python.toolchain(
@@ -27,7 +27,7 @@ local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl"
2727
# External dependency for torch_tensorrt if you already have precompiled binaries.
2828
local_repository(
2929
name = "torch_tensorrt",
30-
path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt",
30+
path = "/opt/conda/lib/python3.10/site-packages/torch_tensorrt",
3131
)
3232

3333

@@ -40,6 +40,15 @@ new_local_repository(
4040
path = "${CUDA_HOME}",
4141
)
4242

43+
# Server Arm (SBSA) and Jetson Jetpack (L4T) use different versions of CUDA and TensorRT
44+
# These versions can be selected using the flag `--//toolchains/dep_collection:compute_libs="jetpack"`
45+
46+
new_local_repository(
47+
name = "cuda_l4t",
48+
build_file = "@//third_party/cuda:BUILD",
49+
path = "/usr/local/cuda-12.6",
50+
)
51+
4352
new_local_repository(
4453
name = "cuda_win",
4554
build_file = "@//third_party/cuda:BUILD",
@@ -53,12 +62,31 @@ http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "ht
5362
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
5463
#############################################################################################################
5564

56-
http_archive(
57-
name = "libtorch",
58-
build_file = "@//third_party/libtorch:BUILD",
59-
strip_prefix = "libtorch",
60-
urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
61-
)
65+
# http_archive(
66+
# name = "libtorch",
67+
# build_file = "@//third_party/libtorch:BUILD",
68+
# strip_prefix = "libtorch",
69+
# urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
70+
# )
71+
72+
# http_archive(
73+
# name = "libtorch_win",
74+
# build_file = "@//third_party/libtorch:BUILD",
75+
# strip_prefix = "libtorch",
76+
# urls = ["https://download.pytorch.org/libtorch//${CHANNEL}/${CU_VERSION}/libtorch-win-shared-with-deps-latest.zip"],
77+
# )
78+
79+
80+
# It is possible to specify a wheel file to use as the libtorch source by providing the URL below and
81+
# using the build flag `--//toolchains/dep_src:torch="whl"`
82+
83+
# http_archive(
84+
# name = "torch_whl",
85+
# build_file = "@//third_party/libtorch:BUILD",
86+
# strip_prefix = "torch",
87+
# type = "zip",
88+
# urls = ["file:///${TORCH_WHL_PATH}"],
89+
# )
6290

6391
# Download these tarballs manually from the NVIDIA website
6492
# Either place them in the distdir directory in third_party and use the --distdir flag
@@ -73,6 +101,24 @@ http_archive(
73101
],
74102
)
75103

104+
http_archive(
105+
name = "tensorrt_sbsa",
106+
build_file = "@//third_party/tensorrt/archive:BUILD",
107+
strip_prefix = "TensorRT-10.9.0.34",
108+
urls = [
109+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.9.0/tars/TensorRT-10.9.0.34.Linux.aarch64-gnu.cuda-12.8.tar.gz",
110+
],
111+
)
112+
113+
http_archive(
114+
name = "tensorrt_l4t",
115+
build_file = "@//third_party/tensorrt/archive:BUILD",
116+
strip_prefix = "TensorRT-10.3.0.26",
117+
urls = [
118+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.l4t.aarch64-gnu.cuda-12.6.tar.gz",
119+
],
120+
)
121+
76122
http_archive(
77123
name = "tensorrt_win",
78124
build_file = "@//third_party/tensorrt/archive:BUILD",
@@ -95,13 +141,13 @@ http_archive(
95141
# for both versions here and do not use --config=pre-cxx11-abi
96142

97143
new_local_repository(
98-
name = "libtorch_win",
144+
name = "libtorch",
99145
path = "${TORCH_INSTALL_PATH}",
100146
build_file = "third_party/libtorch/BUILD"
101147
)
102148

103149
new_local_repository(
104-
name = "libtorch_pre_cxx11_abi",
150+
name = "libtorch_win",
105151
path = "${TORCH_INSTALL_PATH}",
106152
build_file = "third_party/libtorch/BUILD"
107153
)

0 commit comments

Comments
 (0)