Skip to content

Commit f968d8e

Browse files
[tensorrt] NFC: Fix broken translation test
GitOrigin-RevId: cce60f66cb978ba012c074aca0d652710262457a
1 parent 3c67a20 commit f968d8e

File tree

3 files changed

+27
-24
lines changed

3 files changed

+27
-24
lines changed

mlir-tensorrt/integrations/python/setup_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,7 @@ def run_cmake_build(python_package_name: str, python_wheel_staging_dir: Path):
127127
cmake_options.append(
128128
f'-DMLIR_TRT_ENABLE_CUBLAS={os.environ["MLIR_TRT_ENABLE_CUBLAS"]}'
129129
)
130+
130131
# Override TensorRT version if specified
131132
cmake_options.append(f"-DMLIR_TRT_DOWNLOAD_TENSORRT_VERSION={TENSORRT_VERSION}")
132133

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
// RUN: %pick-one-gpu tensorrt-opt -split-input-file -pass-pipeline="builtin.module(translate-tensorrt-to-engine)" \
2+
// RUN: -mlir-elide-elementsattrs-if-larger=32 -tensorrt-builder-opt-level=0 %s | FileCheck %s
3+
4+
// CHECK-LABEL: func.func @trt_host_input
5+
// CHECK-SAME: tensorrt.engine
6+
func.func @trt_host_input(
7+
%arg0: tensor<?x4xf32> {tensorrt.dimension_names = {}, tensorrt.shape_profile = #tensorrt.shape_profile<min = [2, 4], opt = [4, 4], max = [6, 4]>},
8+
%arg1: tensor<i32> {tensorrt.host_tensor, tensorrt.value_bounds = #tensorrt.shape_profile<min = [1], opt = [2], max = [3]>})
9+
-> tensor<?x?xf32> {
10+
%cst_i32 = tensorrt.constant dense<1> : tensor<i32>
11+
%0 = tensorrt.element_wise <kSUM>(%arg0, %arg0 : tensor<?x4xf32>, tensor<?x4xf32>) -> tensor<?x4xf32>
12+
%1 = tensorrt.shape %0 : tensor<?x4xf32> -> tensor<2xi32>
13+
%2 = tensorrt.slice %1[0][1][1] : tensor<2xi32> to tensor<1xi32>
14+
%3 = tensorrt.shuffle {first_transpose = array<i64: 0>, reshape = array<i64>, second_transpose = array<i64>, zero_is_placeholder = false} ins(%2 : tensor<1xi32>) -> tensor<i32>
15+
%4 = tensorrt.element_wise <kPROD>(%3, %cst_i32 : tensor<i32>, tensor<i32>) -> tensor<i32>
16+
%5 = tensorrt.slice %1[1][1][1] : tensor<2xi32> to tensor<1xi32>
17+
%6 = tensorrt.shuffle {first_transpose = array<i64: 0>, reshape = array<i64>, second_transpose = array<i64>, zero_is_placeholder = false} ins(%5 : tensor<1xi32>) -> tensor<i32>
18+
%7 = tensorrt.element_wise <kPROD>(%4, %6 : tensor<i32>, tensor<i32>) -> tensor<i32>
19+
%8 = tensorrt.element_wise <kPROD>(%arg1, %cst_i32 : tensor<i32>, tensor<i32>) -> tensor<i32>
20+
%9 = tensorrt.element_wise <kFLOOR_DIV>(%7, %8 : tensor<i32>, tensor<i32>) -> tensor<i32>
21+
%10 = tensorrt.shuffle {first_transpose = array<i64>, reshape = array<i64: 1>, second_transpose = array<i64: 0>, zero_is_placeholder = false} ins(%9 : tensor<i32>) -> tensor<?xi32>
22+
%11 = tensorrt.shuffle {first_transpose = array<i64>, reshape = array<i64: 1>, second_transpose = array<i64: 0>, zero_is_placeholder = false} ins(%arg1 : tensor<i32>) -> tensor<?xi32>
23+
%12 = tensorrt.concatenation {axis = 0 : i32} ins(%10, %11 : tensor<?xi32>, tensor<?xi32>) -> tensor<2xi32>
24+
%13 = tensorrt.shuffle {first_transpose = array<i64: 0, 1>, second_transpose = array<i64: 0, 1>, zero_is_placeholder = false} ins(%0, %12 : tensor<?x4xf32>, tensor<2xi32>) -> tensor<?x?xf32>
25+
return %13 : tensor<?x?xf32>
26+
}

mlir-tensorrt/tensorrt/test/Target/TensorRT/translate-to-tensorrt.mlir

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -65,27 +65,3 @@ func.func @trt_dim_names(
6565
%0 = tensorrt.identity %arg0 : tensor<?x?xf32> to tensor<?x?xf32>
6666
return %0 : tensor<?x?xf32>
6767
}
68-
69-
// CHECK-LABEL: @trt_host_input
70-
// CHECK-SAME: tensorrt.engine
71-
func.func @trt_host_input(%arg0: tensor<?x4xf32> {tensorrt.dimension_names = {}, tensorrt.shape_profile = #tensorrt.shape_profile<min = [2, 4], opt = [4, 4], max = [6, 4]>}, %arg1: tensor<i32> {plan.memory_space = #plan.memory_space<host>, tensorrt.value_bounds = #tensorrt.shape_profile<min = [1], opt = [2], max = [3]>}) -> tensor<?x?xf32> {
72-
%0 = tensorrt.element_wise <kSUM>(%arg0, %arg0 : tensor<?x4xf32>, tensor<?x4xf32>) -> tensor<?x4xf32>
73-
%1 = tensorrt.shape %0 : tensor<?x4xf32> -> tensor<2xi32>
74-
%2 = tensorrt.slice %1[0][1][1] : tensor<2xi32> to tensor<1xi32>
75-
%3 = tensorrt.collapse_rank %2 : tensor<1xi32> to tensor<i32>
76-
%cst_i32 = tensorrt.constant dense<1> : tensor<i32>
77-
%4 = tensorrt.element_wise <kPROD>(%3, %cst_i32 : tensor<i32>, tensor<i32>) -> tensor<i32>
78-
%5 = tensorrt.slice %1[1][1][1] : tensor<2xi32> to tensor<1xi32>
79-
%6 = tensorrt.collapse_rank %5 : tensor<1xi32> to tensor<i32>
80-
%7 = tensorrt.element_wise <kPROD>(%4, %6 : tensor<i32>, tensor<i32>) -> tensor<i32>
81-
%cst_i32_0 = tensorrt.constant dense<1> : tensor<i32>
82-
%8 = tensorrt.element_wise <kPROD>(%arg1, %cst_i32_0 : tensor<i32>, tensor<i32>) -> tensor<i32>
83-
%9 = tensorrt.element_wise <kFLOOR_DIV>(%7, %8 : tensor<i32>, tensor<i32>) -> tensor<i32>
84-
%cst_i32_1 = tensorrt.constant dense<1> : tensor<1xi32>
85-
%10 = tensorrt.reshape %9 shape(%cst_i32_1: tensor<1xi32>) : tensor<i32> to tensor<?xi32>
86-
%cst_i32_2 = tensorrt.constant dense<1> : tensor<1xi32>
87-
%11 = tensorrt.reshape %arg1 shape(%cst_i32_2: tensor<1xi32>) : tensor<i32> to tensor<?xi32>
88-
%12 = tensorrt.concatenation {axis = 0 : i32} ins(%10, %11 : tensor<?xi32>, tensor<?xi32>) -> tensor<2xi32>
89-
%13 = tensorrt.reshape %0 shape(%12: tensor<2xi32>) : tensor<?x4xf32> to tensor<?x?xf32>
90-
return %13 : tensor<?x?xf32>
91-
}

0 commit comments

Comments
 (0)