1
1
module (
2
2
name = "torch_tensorrt" ,
3
3
repo_name = "org_pytorch_tensorrt" ,
4
- version = ""
4
+ version = "2.8.0 "
5
5
)
6
6
7
- bazel_dep (name = "googletest" , version = "1.14 .0" )
8
- bazel_dep (name = "platforms" , version = "0.0.10 " )
9
- bazel_dep (name = "rules_cc" , version = "0.0.9 " )
10
- bazel_dep (name = "rules_python" , version = "0.34 .0" )
7
+ bazel_dep (name = "googletest" , version = "1.16 .0" )
8
+ bazel_dep (name = "platforms" , version = "0.0.11 " )
9
+ bazel_dep (name = "rules_cc" , version = "0.1.1 " )
10
+ bazel_dep (name = "rules_python" , version = "1.3 .0" )
11
11
12
12
python = use_extension ("@rules_python//python/extensions:python.bzl" , "python" )
13
13
python .toolchain (
@@ -40,10 +40,16 @@ new_local_repository(
40
40
path = "/usr/local/cuda-12.8" ,
41
41
)
42
42
43
+ new_local_repository (
44
+ name = "cuda_l4t" ,
45
+ build_file = "@//third_party/cuda:BUILD" ,
46
+ path = "/usr/local/cuda-12.8" ,
47
+ )
48
+
43
49
new_local_repository (
44
50
name = "cuda_win" ,
45
51
build_file = "@//third_party/cuda:BUILD" ,
46
- path = "" ,
52
+ path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.8/ " ,
47
53
)
48
54
49
55
@@ -57,19 +63,24 @@ http_archive(
57
63
name = "libtorch" ,
58
64
build_file = "@//third_party/libtorch:BUILD" ,
59
65
strip_prefix = "libtorch" ,
60
- << << << < HEAD
61
- urls = ["https://download.pytorch.org/libtorch///libtorch-cxx11-abi-shared-with-deps-latest.zip" ],
62
- == == == =
63
66
urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-cxx11-abi-shared-with-deps-latest.zip" ],
64
67
)
65
68
69
+
70
+ http_archive (
71
+ name = "libtorch_win" ,
72
+ build_file = "@//third_party/libtorch:BUILD" ,
73
+ strip_prefix = "libtorch" ,
74
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-win-shared-with-deps-latest.zip" ],
75
+ )
76
+
77
+
66
78
http_archive (
67
79
name = "torch_whl" ,
68
80
build_file = "@//third_party/libtorch:BUILD" ,
69
81
strip_prefix = "torch" ,
70
82
type = "zip" ,
71
83
urls = ["https://download.pytorch.org/whl/nightly/cu128/torch-2.8.0.dev20250414%2Bcu128-cp39-cp39-manylinux_2_28_aarch64.whl" ],
72
- > >> >> >> 75173 f897 (infra : bazel aarch64 colocation )
73
84
)
74
85
75
86
# Download these tarballs manually from the NVIDIA website
@@ -94,6 +105,15 @@ http_archive(
94
105
],
95
106
)
96
107
108
+ http_archive (
109
+ name = "tensorrt_l4t" ,
110
+ build_file = "@//third_party/tensorrt/archive:BUILD" ,
111
+ strip_prefix = "TensorRT-10.3.0.26" ,
112
+ urls = [
113
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.l4t.aarch64-gnu.cuda-12.6.tar.gz" ,
114
+ ],
115
+ )
116
+
97
117
http_archive (
98
118
name = "tensorrt_win" ,
99
119
build_file = "@//third_party/tensorrt/archive:BUILD" ,
@@ -115,32 +135,12 @@ http_archive(
115
135
# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
116
136
# for both versions here and do not use --config=pre-cxx11-abi
117
137
118
- < << << << HEAD
119
- new_local_repository (
120
- name = "libtorch_win" ,
121
- path = "" ,
122
- build_file = "third_party/libtorch/BUILD"
123
- )
124
-
125
- new_local_repository (
126
- name = "libtorch_pre_cxx11_abi" ,
127
- path = "" ,
128
- build_file = "third_party/libtorch/BUILD"
129
- )
130
- == == == =
131
138
# new_local_repository(
132
139
# name = "libtorch",
133
- # path = "/workspace/tensorrt/.venv/lib/python3.9 /site-packages/torch",
140
+ # path = "/workspace/tensorrt/.venv/lib/python3.10 /site-packages/torch",
134
141
# build_file = "third_party/libtorch/BUILD"
135
142
# )
136
143
137
- # new_local_repository(
138
- # name = "libtorch_pre_cxx11_abi",
139
- # path = "",
140
- # build_file = "third_party/libtorch/BUILD"
141
- # )
142
- >> >> >> > 75173 f897 (infra : bazel aarch64 colocation )
143
-
144
144
#new_local_repository(
145
145
# name = "tensorrt",
146
146
# path = "/usr/",
0 commit comments