From 024c48b23c0e8721d94e544fa78bcfd291afa439 Mon Sep 17 00:00:00 2001 From: Scott Todd Date: Mon, 24 Jun 2024 09:43:10 -0700 Subject: [PATCH] Add check tests for more tensor dialect ops. (#17726) Partial follow-up to https://github.com/iree-org/iree/pull/17696#discussion_r1645218026. The `concat` tests exercises `__builtin_splat_i64` (incidentally, and the tests are mostly folded away if `util.optimization_barrier` is omitted). That PR addressed a bug in `__builtin_fill_i64`. To reliably test those builtins directly, we'll probably need to add other tests that start after stream. Docs: https://mlir.llvm.org/docs/Dialects/TensorOps/ --------- Signed-off-by: Scott Todd --- tests/e2e/tensor_ops/BUILD.bazel | 12 +++++ tests/e2e/tensor_ops/CMakeLists.txt | 12 +++++ tests/e2e/tensor_ops/collapse_shape.mlir | 15 ++++++ tests/e2e/tensor_ops/concat.mlir | 59 ++++++++++++++++++++++++ tests/e2e/tensor_ops/expand_shape.mlir | 15 ++++++ tests/e2e/tensor_ops/extract_slice.mlir | 24 ++++++++++ 6 files changed, 137 insertions(+) create mode 100644 tests/e2e/tensor_ops/collapse_shape.mlir create mode 100644 tests/e2e/tensor_ops/concat.mlir create mode 100644 tests/e2e/tensor_ops/expand_shape.mlir diff --git a/tests/e2e/tensor_ops/BUILD.bazel b/tests/e2e/tensor_ops/BUILD.bazel index f1008b6ea05e..35500dced487 100644 --- a/tests/e2e/tensor_ops/BUILD.bazel +++ b/tests/e2e/tensor_ops/BUILD.bazel @@ -17,6 +17,9 @@ iree_check_single_backend_test_suite( srcs = enforce_glob( # keep sorted [ + "collapse_shape.mlir", + "concat.mlir", + "expand_shape.mlir", "extract_slice.mlir", "pack.mlir", "pack_dynamic_inner_tiles.mlir", @@ -37,6 +40,9 @@ iree_check_single_backend_test_suite( srcs = enforce_glob( # keep sorted [ + "collapse_shape.mlir", + "concat.mlir", + "expand_shape.mlir", "extract_slice.mlir", "pack.mlir", "pack_dynamic_inner_tiles.mlir", @@ -78,6 +84,9 @@ iree_check_single_backend_test_suite( srcs = enforce_glob( # keep sorted [ + "collapse_shape.mlir", + "concat.mlir", + "expand_shape.mlir", "extract_slice.mlir", "pack.mlir", "tensor_insert_slice.mlir", @@ -106,6 +115,9 @@ iree_check_single_backend_test_suite( srcs = enforce_glob( # keep sorted [ + "collapse_shape.mlir", + "concat.mlir", + "expand_shape.mlir", "extract_slice.mlir", "tensor_cast.mlir", "tensor_insert_slice.mlir", diff --git a/tests/e2e/tensor_ops/CMakeLists.txt b/tests/e2e/tensor_ops/CMakeLists.txt index 52b7932363ce..c3c0a5115346 100644 --- a/tests/e2e/tensor_ops/CMakeLists.txt +++ b/tests/e2e/tensor_ops/CMakeLists.txt @@ -14,6 +14,9 @@ iree_check_single_backend_test_suite( NAME check_llvm-cpu_local-task SRCS + "collapse_shape.mlir" + "concat.mlir" + "expand_shape.mlir" "extract_slice.mlir" "pack.mlir" "pack_dynamic_inner_tiles.mlir" @@ -31,6 +34,9 @@ iree_check_single_backend_test_suite( NAME check_vmvx_local-task SRCS + "collapse_shape.mlir" + "concat.mlir" + "expand_shape.mlir" "extract_slice.mlir" "pack.mlir" "pack_dynamic_inner_tiles.mlir" @@ -64,6 +70,9 @@ iree_check_single_backend_test_suite( NAME check_cuda SRCS + "collapse_shape.mlir" + "concat.mlir" + "expand_shape.mlir" "extract_slice.mlir" "pack.mlir" "tensor_insert_slice.mlir" @@ -83,6 +92,9 @@ iree_check_single_backend_test_suite( NAME check_vulkan-spirv_vulkan SRCS + "collapse_shape.mlir" + "concat.mlir" + "expand_shape.mlir" "extract_slice.mlir" "tensor_cast.mlir" "tensor_insert_slice.mlir" diff --git a/tests/e2e/tensor_ops/collapse_shape.mlir b/tests/e2e/tensor_ops/collapse_shape.mlir new file mode 100644 index 000000000000..2944ce7669f7 --- /dev/null +++ b/tests/e2e/tensor_ops/collapse_shape.mlir @@ -0,0 +1,15 @@ +func.func @collapse_shape_i32() { + %1 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<1x4xi32> + %2 = util.optimization_barrier %1 : tensor<1x4xi32> + %collapsed = tensor.collapse_shape %2 [[0, 1]] : tensor<1x4xi32> into tensor<4xi32> + check.expect_eq_const(%collapsed, dense<[1,2,3,4]> : tensor<4xi32>) : tensor<4xi32> + return +} + +func.func @collapse_shape_i64() { + %1 = arith.constant dense<[[1, 2, 3, 4]]> : tensor<1x4xi64> + %2 = util.optimization_barrier %1 : tensor<1x4xi64> + %collapsed = tensor.collapse_shape %2 [[0, 1]] : tensor<1x4xi64> into tensor<4xi64> + check.expect_eq_const(%collapsed, dense<[1,2,3,4]> : tensor<4xi64>) : tensor<4xi64> + return +} diff --git a/tests/e2e/tensor_ops/concat.mlir b/tests/e2e/tensor_ops/concat.mlir new file mode 100644 index 000000000000..93680eda74bc --- /dev/null +++ b/tests/e2e/tensor_ops/concat.mlir @@ -0,0 +1,59 @@ +func.func @concat_i8_static_dim0() { + %cst_1 = arith.constant dense<1> : tensor<1xi8> + %cst_2 = arith.constant dense<2> : tensor<1xi8> + %1 = util.optimization_barrier %cst_1 : tensor<1xi8> + %2 = util.optimization_barrier %cst_2 : tensor<1xi8> + %concat = tensor.concat dim(0) %1, %2 : (tensor<1xi8>, tensor<1xi8>) -> tensor<2xi8> + check.expect_eq_const(%concat, dense<[1,2]> : tensor<2xi8>) : tensor<2xi8> + return +} + +func.func @concat_i16_static_dim0() { + %cst_1 = arith.constant dense<1> : tensor<1xi16> + %cst_2 = arith.constant dense<2> : tensor<1xi16> + %1 = util.optimization_barrier %cst_1 : tensor<1xi16> + %2 = util.optimization_barrier %cst_2 : tensor<1xi16> + %concat = tensor.concat dim(0) %1, %2 : (tensor<1xi16>, tensor<1xi16>) -> tensor<2xi16> + check.expect_eq_const(%concat, dense<[1,2]> : tensor<2xi16>) : tensor<2xi16> + return +} + +func.func @concat_i32_static_dim0() { + %cst_1 = arith.constant dense<1> : tensor<1xi32> + %cst_2 = arith.constant dense<2> : tensor<1xi32> + %1 = util.optimization_barrier %cst_1 : tensor<1xi32> + %2 = util.optimization_barrier %cst_2 : tensor<1xi32> + %concat = tensor.concat dim(0) %1, %2 : (tensor<1xi32>, tensor<1xi32>) -> tensor<2xi32> + check.expect_eq_const(%concat, dense<[1,2]> : tensor<2xi32>) : tensor<2xi32> + return +} + +func.func @concat_i64_static_dim0() { + %cst_1 = arith.constant dense<1> : tensor<1xi64> + %cst_2 = arith.constant dense<2> : tensor<1xi64> + %1 = util.optimization_barrier %cst_1 : tensor<1xi64> + %2 = util.optimization_barrier %cst_2 : tensor<1xi64> + %concat = tensor.concat dim(0) %1, %2 : (tensor<1xi64>, tensor<1xi64>) -> tensor<2xi64> + check.expect_eq_const(%concat, dense<[1,2]> : tensor<2xi64>) : tensor<2xi64> + return +} + +func.func @concat_f32_static_dim0() { + %cst_1 = arith.constant dense<1.0> : tensor<1xf32> + %cst_2 = arith.constant dense<2.0> : tensor<1xf32> + %1 = util.optimization_barrier %cst_1 : tensor<1xf32> + %2 = util.optimization_barrier %cst_2 : tensor<1xf32> + %concat = tensor.concat dim(0) %1, %2 : (tensor<1xf32>, tensor<1xf32>) -> tensor<2xf32> + check.expect_almost_eq_const(%concat, dense<[1.0,2.0]> : tensor<2xf32>) : tensor<2xf32> + return +} + +func.func @concat_i32_dim1() { + %lhs = arith.constant dense<[[1,2,3],[-1,-2,-3]]> : tensor<2x3xi32> + %rhs = arith.constant dense<[[4,5,6,7,8],[-4,-5,-6,-7,-8]]> : tensor<2x5xi32> + %lhs_barrier = util.optimization_barrier %lhs : tensor<2x3xi32> + %rhs_barrier = util.optimization_barrier %rhs : tensor<2x5xi32> + %concat = tensor.concat dim(1) %lhs_barrier, %rhs_barrier : (tensor<2x3xi32>, tensor<2x5xi32>) -> tensor<2x8xi32> + check.expect_eq_const(%concat, dense<[[1,2,3,4,5,6,7,8],[-1,-2,-3,-4,-5,-6,-7,-8]]> : tensor<2x8xi32>) : tensor<2x8xi32> + return +} diff --git a/tests/e2e/tensor_ops/expand_shape.mlir b/tests/e2e/tensor_ops/expand_shape.mlir new file mode 100644 index 000000000000..10f405effd6e --- /dev/null +++ b/tests/e2e/tensor_ops/expand_shape.mlir @@ -0,0 +1,15 @@ +func.func @expand_shape_i32() { + %1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32> + %2 = util.optimization_barrier %1 : tensor<4xi32> + %expanded = tensor.expand_shape %2 [[0, 1]] output_shape [1, 4] : tensor<4xi32> into tensor<1x4xi32> + check.expect_eq_const(%expanded, dense<[[1,2,3,4]]> : tensor<1x4xi32>) : tensor<1x4xi32> + return +} + +func.func @expand_shape_i64() { + %1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi64> + %2 = util.optimization_barrier %1 : tensor<4xi64> + %expanded = tensor.expand_shape %2 [[0, 1]] output_shape [1, 4] : tensor<4xi64> into tensor<1x4xi64> + check.expect_eq_const(%expanded, dense<[[1,2,3,4]]> : tensor<1x4xi64>) : tensor<1x4xi64> + return +} diff --git a/tests/e2e/tensor_ops/extract_slice.mlir b/tests/e2e/tensor_ops/extract_slice.mlir index 6ec9ee4d462e..a59d5081fec5 100644 --- a/tests/e2e/tensor_ops/extract_slice.mlir +++ b/tests/e2e/tensor_ops/extract_slice.mlir @@ -1,3 +1,27 @@ +func.func public @extract_slice_i32_offset1_size2_stride1() { + %1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32> + %2 = util.optimization_barrier %1 : tensor<4xi32> + %extracted_slice = tensor.extract_slice %2[1] [2] [1] : tensor<4xi32> to tensor<2xi32> + check.expect_eq_const(%extracted_slice, dense<[2, 3]> : tensor<2xi32>) : tensor<2xi32> + return +} + +func.func public @extract_slice_i64_offset1_size2_stride1() { + %1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi64> + %2 = util.optimization_barrier %1 : tensor<4xi64> + %extracted_slice = tensor.extract_slice %2[1] [2] [1] : tensor<4xi64> to tensor<2xi64> + check.expect_eq_const(%extracted_slice, dense<[2, 3]> : tensor<2xi64>) : tensor<2xi64> + return +} + +func.func public @extract_slice_i32_offset1_size2_stride2() { + %1 = arith.constant dense<[1, 2, 3, 4]> : tensor<4xi32> + %2 = util.optimization_barrier %1 : tensor<4xi32> + %extracted_slice = tensor.extract_slice %2[1] [2] [2] : tensor<4xi32> to tensor<2xi32> + check.expect_eq_const(%extracted_slice, dense<[2, 4]> : tensor<2xi32>) : tensor<2xi32> + return +} + func.func @extract_slice_strided() { %0 = tensor.empty() : tensor<500x750xi32> %1 = linalg.generic {