11module (
22 name = "torch_tensorrt" ,
33 repo_name = "org_pytorch_tensorrt" ,
4- version = ""
4+ version = "2.8.0 "
55)
66
7- bazel_dep (name = "googletest" , version = "1.14 .0" )
8- bazel_dep (name = "platforms" , version = "0.0.10 " )
9- bazel_dep (name = "rules_cc" , version = "0.0.9 " )
10- bazel_dep (name = "rules_python" , version = "0.34 .0" )
7+ bazel_dep (name = "googletest" , version = "1.16 .0" )
8+ bazel_dep (name = "platforms" , version = "0.0.11 " )
9+ bazel_dep (name = "rules_cc" , version = "0.1.1 " )
10+ bazel_dep (name = "rules_python" , version = "1.3 .0" )
1111
1212python = use_extension ("@rules_python//python/extensions:python.bzl" , "python" )
1313python .toolchain (
@@ -40,10 +40,16 @@ new_local_repository(
4040 path = "/usr/local/cuda-12.8" ,
4141)
4242
43+ new_local_repository (
44+ name = "cuda_l4t" ,
45+ build_file = "@//third_party/cuda:BUILD" ,
46+ path = "/usr/local/cuda-12.8" ,
47+ )
48+
4349new_local_repository (
4450 name = "cuda_win" ,
4551 build_file = "@//third_party/cuda:BUILD" ,
46- path = "" ,
52+ path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.8/ " ,
4753)
4854
4955
@@ -57,19 +63,24 @@ http_archive(
5763 name = "libtorch" ,
5864 build_file = "@//third_party/libtorch:BUILD" ,
5965 strip_prefix = "libtorch" ,
60- << << << < HEAD
61- urls = ["https://download.pytorch.org/libtorch///libtorch-cxx11-abi-shared-with-deps-latest.zip" ],
62- == == == =
6366 urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-cxx11-abi-shared-with-deps-latest.zip" ],
6467)
6568
69+
70+ http_archive (
71+ name = "libtorch_win" ,
72+ build_file = "@//third_party/libtorch:BUILD" ,
73+ strip_prefix = "libtorch" ,
74+ urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-win-shared-with-deps-latest.zip" ],
75+ )
76+
77+
6678http_archive (
6779 name = "torch_whl" ,
6880 build_file = "@//third_party/libtorch:BUILD" ,
6981 strip_prefix = "torch" ,
7082 type = "zip" ,
7183 urls = ["https://download.pytorch.org/whl/nightly/cu128/torch-2.8.0.dev20250414%2Bcu128-cp39-cp39-manylinux_2_28_aarch64.whl" ],
72- > >> >> >> 75173 f897 (infra : bazel aarch64 colocation )
7384)
7485
7586# Download these tarballs manually from the NVIDIA website
@@ -94,6 +105,15 @@ http_archive(
94105 ],
95106)
96107
108+ http_archive (
109+ name = "tensorrt_l4t" ,
110+ build_file = "@//third_party/tensorrt/archive:BUILD" ,
111+ strip_prefix = "TensorRT-10.3.0.26" ,
112+ urls = [
113+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.l4t.aarch64-gnu.cuda-12.6.tar.gz" ,
114+ ],
115+ )
116+
97117http_archive (
98118 name = "tensorrt_win" ,
99119 build_file = "@//third_party/tensorrt/archive:BUILD" ,
@@ -115,32 +135,12 @@ http_archive(
115135# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
116136# for both versions here and do not use --config=pre-cxx11-abi
117137
118- < << << << HEAD
119- new_local_repository (
120- name = "libtorch_win" ,
121- path = "" ,
122- build_file = "third_party/libtorch/BUILD"
123- )
124-
125- new_local_repository (
126- name = "libtorch_pre_cxx11_abi" ,
127- path = "" ,
128- build_file = "third_party/libtorch/BUILD"
129- )
130- == == == =
131138# new_local_repository(
132139# name = "libtorch",
133- # path = "/workspace/tensorrt/.venv/lib/python3.9 /site-packages/torch",
140+ # path = "/workspace/tensorrt/.venv/lib/python3.10 /site-packages/torch",
134141# build_file = "third_party/libtorch/BUILD"
135142# )
136143
137- # new_local_repository(
138- # name = "libtorch_pre_cxx11_abi",
139- # path = "",
140- # build_file = "third_party/libtorch/BUILD"
141- # )
142- >> >> >> > 75173 f897 (infra : bazel aarch64 colocation )
143-
144144#new_local_repository(
145145# name = "tensorrt",
146146# path = "/usr/",
0 commit comments