Skip to content

Commit

Permalink
Add check tests for more tensor dialect ops. (iree-org#17726)
Browse files Browse the repository at this point in the history
Partial follow-up to
iree-org#17696 (comment). The
`concat` tests exercises `__builtin_splat_i64` (incidentally, and the
tests are mostly folded away if `util.optimization_barrier` is omitted).
That PR addressed a bug in `__builtin_fill_i64`. To reliably test those
builtins directly, we'll probably need to add other tests that start
after stream.

Docs: https://mlir.llvm.org/docs/Dialects/TensorOps/

---------

Signed-off-by: Scott Todd <[email protected]>
  • Loading branch information
ScottTodd authored Jun 24, 2024
1 parent 9eb62c4 commit 024c48b
Show file tree
Hide file tree
Showing 6 changed files with 137 additions and 0 deletions.
12 changes: 12 additions & 0 deletions tests/e2e/tensor_ops/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ iree_check_single_backend_test_suite(
srcs = enforce_glob(
# keep sorted
[
"collapse_shape.mlir",
"concat.mlir",
"expand_shape.mlir",
"extract_slice.mlir",
"pack.mlir",
"pack_dynamic_inner_tiles.mlir",
Expand All @@ -37,6 +40,9 @@ iree_check_single_backend_test_suite(
srcs = enforce_glob(
# keep sorted
[
"collapse_shape.mlir",
"concat.mlir",
"expand_shape.mlir",
"extract_slice.mlir",
"pack.mlir",
"pack_dynamic_inner_tiles.mlir",
Expand Down Expand Up @@ -78,6 +84,9 @@ iree_check_single_backend_test_suite(
srcs = enforce_glob(
# keep sorted
[
"collapse_shape.mlir",
"concat.mlir",
"expand_shape.mlir",
"extract_slice.mlir",
"pack.mlir",
"tensor_insert_slice.mlir",
Expand Down Expand Up @@ -106,6 +115,9 @@ iree_check_single_backend_test_suite(
srcs = enforce_glob(
# keep sorted
[
"collapse_shape.mlir",
"concat.mlir",
"expand_shape.mlir",
"extract_slice.mlir",
"tensor_cast.mlir",
"tensor_insert_slice.mlir",
Expand Down
12 changes: 12 additions & 0 deletions tests/e2e/tensor_ops/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ iree_check_single_backend_test_suite(
NAME
check_llvm-cpu_local-task
SRCS
"collapse_shape.mlir"
"concat.mlir"
"expand_shape.mlir"
"extract_slice.mlir"
"pack.mlir"
"pack_dynamic_inner_tiles.mlir"
Expand All @@ -31,6 +34,9 @@ iree_check_single_backend_test_suite(
NAME
check_vmvx_local-task
SRCS
"collapse_shape.mlir"
"concat.mlir"
"expand_shape.mlir"
"extract_slice.mlir"
"pack.mlir"
"pack_dynamic_inner_tiles.mlir"
Expand Down Expand Up @@ -64,6 +70,9 @@ iree_check_single_backend_test_suite(
NAME
check_cuda
SRCS
"collapse_shape.mlir"
"concat.mlir"
"expand_shape.mlir"
"extract_slice.mlir"
"pack.mlir"
"tensor_insert_slice.mlir"
Expand All @@ -83,6 +92,9 @@ iree_check_single_backend_test_suite(
NAME
check_vulkan-spirv_vulkan
SRCS
"collapse_shape.mlir"
"concat.mlir"
"expand_shape.mlir"
"extract_slice.mlir"
"tensor_cast.mlir"
"tensor_insert_slice.mlir"
Expand Down
15 changes: 15 additions & 0 deletions tests/e2e/tensor_ops/collapse_shape.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
func.func @collapse_shape_i32() {
%1 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<1x4xi32>
%2 = util.optimization_barrier %1 : tensor<1x4xi32>
%collapsed = tensor.collapse_shape %2 [[0, 1]] : tensor<1x4xi32> into tensor<4xi32>
check.expect_eq_const(%collapsed, dense<[1,2,3,4]> : tensor<4xi32>) : tensor<4xi32>
return
}

func.func @collapse_shape_i64() {
%1 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<1x4xi64>
%2 = util.optimization_barrier %1 : tensor<1x4xi64>
%collapsed = tensor.collapse_shape %2 [[0, 1]] : tensor<1x4xi64> into tensor<4xi64>
check.expect_eq_const(%collapsed, dense<[1,2,3,4]> : tensor<4xi64>) : tensor<4xi64>
return
}
59 changes: 59 additions & 0 deletions tests/e2e/tensor_ops/concat.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
func.func @concat_i8_static_dim0() {
%cst_1 = arith.constant dense<1> : tensor<1xi8>
%cst_2 = arith.constant dense<2> : tensor<1xi8>
%1 = util.optimization_barrier %cst_1 : tensor<1xi8>
%2 = util.optimization_barrier %cst_2 : tensor<1xi8>
%concat = tensor.concat dim(0) %1, %2 : (tensor<1xi8>, tensor<1xi8>) -> tensor<2xi8>
check.expect_eq_const(%concat, dense<[1,2]> : tensor<2xi8>) : tensor<2xi8>
return
}

func.func @concat_i16_static_dim0() {
%cst_1 = arith.constant dense<1> : tensor<1xi16>
%cst_2 = arith.constant dense<2> : tensor<1xi16>
%1 = util.optimization_barrier %cst_1 : tensor<1xi16>
%2 = util.optimization_barrier %cst_2 : tensor<1xi16>
%concat = tensor.concat dim(0) %1, %2 : (tensor<1xi16>, tensor<1xi16>) -> tensor<2xi16>
check.expect_eq_const(%concat, dense<[1,2]> : tensor<2xi16>) : tensor<2xi16>
return
}

func.func @concat_i32_static_dim0() {
%cst_1 = arith.constant dense<1> : tensor<1xi32>
%cst_2 = arith.constant dense<2> : tensor<1xi32>
%1 = util.optimization_barrier %cst_1 : tensor<1xi32>
%2 = util.optimization_barrier %cst_2 : tensor<1xi32>
%concat = tensor.concat dim(0) %1, %2 : (tensor<1xi32>, tensor<1xi32>) -> tensor<2xi32>
check.expect_eq_const(%concat, dense<[1,2]> : tensor<2xi32>) : tensor<2xi32>
return
}

func.func @concat_i64_static_dim0() {
%cst_1 = arith.constant dense<1> : tensor<1xi64>
%cst_2 = arith.constant dense<2> : tensor<1xi64>
%1 = util.optimization_barrier %cst_1 : tensor<1xi64>
%2 = util.optimization_barrier %cst_2 : tensor<1xi64>
%concat = tensor.concat dim(0) %1, %2 : (tensor<1xi64>, tensor<1xi64>) -> tensor<2xi64>
check.expect_eq_const(%concat, dense<[1,2]> : tensor<2xi64>) : tensor<2xi64>
return
}

func.func @concat_f32_static_dim0() {
%cst_1 = arith.constant dense<1.0> : tensor<1xf32>
%cst_2 = arith.constant dense<2.0> : tensor<1xf32>
%1 = util.optimization_barrier %cst_1 : tensor<1xf32>
%2 = util.optimization_barrier %cst_2 : tensor<1xf32>
%concat = tensor.concat dim(0) %1, %2 : (tensor<1xf32>, tensor<1xf32>) -> tensor<2xf32>
check.expect_almost_eq_const(%concat, dense<[1.0,2.0]> : tensor<2xf32>) : tensor<2xf32>
return
}

func.func @concat_i32_dim1() {
%lhs = arith.constant dense<[[1,2,3],[-1,-2,-3]]> : tensor<2x3xi32>
%rhs = arith.constant dense<[[4,5,6,7,8],[-4,-5,-6,-7,-8]]> : tensor<2x5xi32>
%lhs_barrier = util.optimization_barrier %lhs : tensor<2x3xi32>
%rhs_barrier = util.optimization_barrier %rhs : tensor<2x5xi32>
%concat = tensor.concat dim(1) %lhs_barrier, %rhs_barrier : (tensor<2x3xi32>, tensor<2x5xi32>) -> tensor<2x8xi32>
check.expect_eq_const(%concat, dense<[[1,2,3,4,5,6,7,8],[-1,-2,-3,-4,-5,-6,-7,-8]]> : tensor<2x8xi32>) : tensor<2x8xi32>
return
}
15 changes: 15 additions & 0 deletions tests/e2e/tensor_ops/expand_shape.mlir
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
func.func @expand_shape_i32() {
%1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32>
%2 = util.optimization_barrier %1 : tensor<4xi32>
%expanded = tensor.expand_shape %2 [[0, 1]] output_shape [1, 4] : tensor<4xi32> into tensor<1x4xi32>
check.expect_eq_const(%expanded, dense<[[1,2,3,4]]> : tensor<1x4xi32>) : tensor<1x4xi32>
return
}

func.func @expand_shape_i64() {
%1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi64>
%2 = util.optimization_barrier %1 : tensor<4xi64>
%expanded = tensor.expand_shape %2 [[0, 1]] output_shape [1, 4] : tensor<4xi64> into tensor<1x4xi64>
check.expect_eq_const(%expanded, dense<[[1,2,3,4]]> : tensor<1x4xi64>) : tensor<1x4xi64>
return
}
24 changes: 24 additions & 0 deletions tests/e2e/tensor_ops/extract_slice.mlir
Original file line number Diff line number Diff line change
@@ -1,3 +1,27 @@
func.func public @extract_slice_i32_offset1_size2_stride1() {
%1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32>
%2 = util.optimization_barrier %1 : tensor<4xi32>
%extracted_slice = tensor.extract_slice %2[1] [2] [1] : tensor<4xi32> to tensor<2xi32>
check.expect_eq_const(%extracted_slice, dense<[2, 3]> : tensor<2xi32>) : tensor<2xi32>
return
}

func.func public @extract_slice_i64_offset1_size2_stride1() {
%1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi64>
%2 = util.optimization_barrier %1 : tensor<4xi64>
%extracted_slice = tensor.extract_slice %2[1] [2] [1] : tensor<4xi64> to tensor<2xi64>
check.expect_eq_const(%extracted_slice, dense<[2, 3]> : tensor<2xi64>) : tensor<2xi64>
return
}

func.func public @extract_slice_i32_offset1_size2_stride2() {
%1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32>
%2 = util.optimization_barrier %1 : tensor<4xi32>
%extracted_slice = tensor.extract_slice %2[1] [2] [2] : tensor<4xi32> to tensor<2xi32>
check.expect_eq_const(%extracted_slice, dense<[2, 4]> : tensor<2xi32>) : tensor<2xi32>
return
}

func.func @extract_slice_strided() {
%0 = tensor.empty() : tensor<500x750xi32>
%1 = linalg.generic {
Expand Down

0 comments on commit 024c48b

Please sign in to comment.