@@ -4,10 +4,10 @@ module(
4
4
version = "${BUILD_VERSION}"
5
5
)
6
6
7
- bazel_dep(name = "googletest", version = "1.14 .0")
8
- bazel_dep(name = "platforms", version = "0.0.10 ")
9
- bazel_dep(name = "rules_cc", version = "0.0.9 ")
10
- bazel_dep(name = "rules_python", version = "0.34 .0")
7
+ bazel_dep(name = "googletest", version = "1.16 .0")
8
+ bazel_dep(name = "platforms", version = "0.0.11 ")
9
+ bazel_dep(name = "rules_cc", version = "0.1.1 ")
10
+ bazel_dep(name = "rules_python", version = "1.3 .0")
11
11
12
12
python = use_extension("@rules_python//python/extensions:python.bzl", "python")
13
13
python.toolchain(
@@ -27,7 +27,7 @@ local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl"
27
27
# External dependency for torch_tensorrt if you already have precompiled binaries.
28
28
local_repository(
29
29
name = "torch_tensorrt",
30
- path = "/opt/conda/lib/python3.8 /site-packages/torch_tensorrt",
30
+ path = "/opt/conda/lib/python3.10 /site-packages/torch_tensorrt",
31
31
)
32
32
33
33
@@ -40,6 +40,15 @@ new_local_repository(
40
40
path = "${CUDA_HOME}",
41
41
)
42
42
43
+ # Server Arm (SBSA) and Jetson Jetpack (L4T) use different versions of CUDA and TensorRT
44
+ # These versions can be selected using the flag `--//toolchains/dep_collection:compute_libs="jetpack"`
45
+
46
+ new_local_repository(
47
+ name = "cuda_l4t",
48
+ build_file = "@//third_party/cuda:BUILD",
49
+ path = "/usr/local/cuda-12.6",
50
+ )
51
+
43
52
new_local_repository(
44
53
name = "cuda_win",
45
54
build_file = "@//third_party/cuda:BUILD",
@@ -53,12 +62,31 @@ http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "ht
53
62
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
54
63
#############################################################################################################
55
64
56
- http_archive(
57
- name = "libtorch",
58
- build_file = "@//third_party/libtorch:BUILD",
59
- strip_prefix = "libtorch",
60
- urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
61
- )
65
+ # http_archive(
66
+ # name = "libtorch",
67
+ # build_file = "@//third_party/libtorch:BUILD",
68
+ # strip_prefix = "libtorch",
69
+ # urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
70
+ # )
71
+
72
+ # http_archive(
73
+ # name = "libtorch_win",
74
+ # build_file = "@//third_party/libtorch:BUILD",
75
+ # strip_prefix = "libtorch",
76
+ # urls = ["https://download.pytorch.org/libtorch//${CHANNEL}/${CU_VERSION}/libtorch-win-shared-with-deps-latest.zip"],
77
+ # )
78
+
79
+
80
+ # It is possible to specify a wheel file to use as the libtorch source by providing the URL below and
81
+ # using the build flag `--//toolchains/dep_src:torch="whl"`
82
+
83
+ # http_archive(
84
+ # name = "torch_whl",
85
+ # build_file = "@//third_party/libtorch:BUILD",
86
+ # strip_prefix = "torch",
87
+ # type = "zip",
88
+ # urls = ["file:///${TORCH_WHL_PATH}"],
89
+ # )
62
90
63
91
# Download these tarballs manually from the NVIDIA website
64
92
# Either place them in the distdir directory in third_party and use the --distdir flag
@@ -73,6 +101,24 @@ http_archive(
73
101
],
74
102
)
75
103
104
+ http_archive(
105
+ name = "tensorrt_sbsa",
106
+ build_file = "@//third_party/tensorrt/archive:BUILD",
107
+ strip_prefix = "TensorRT-10.9.0.34",
108
+ urls = [
109
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.9.0/tars/TensorRT-10.9.0.34.Linux.aarch64-gnu.cuda-12.8.tar.gz",
110
+ ],
111
+ )
112
+
113
+ http_archive(
114
+ name = "tensorrt_l4t",
115
+ build_file = "@//third_party/tensorrt/archive:BUILD",
116
+ strip_prefix = "TensorRT-10.3.0.26",
117
+ urls = [
118
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.l4t.aarch64-gnu.cuda-12.6.tar.gz",
119
+ ],
120
+ )
121
+
76
122
http_archive(
77
123
name = "tensorrt_win",
78
124
build_file = "@//third_party/tensorrt/archive:BUILD",
@@ -95,13 +141,13 @@ http_archive(
95
141
# for both versions here and do not use --config=pre-cxx11-abi
96
142
97
143
new_local_repository(
98
- name = "libtorch_win ",
144
+ name = "libtorch ",
99
145
path = "${TORCH_INSTALL_PATH}",
100
146
build_file = "third_party/libtorch/BUILD"
101
147
)
102
148
103
149
new_local_repository(
104
- name = "libtorch_pre_cxx11_abi ",
150
+ name = "libtorch_win ",
105
151
path = "${TORCH_INSTALL_PATH}",
106
152
build_file = "third_party/libtorch/BUILD"
107
153
)
0 commit comments