diff --git a/test/Dialect/TorchConversion/torch-backend-to-linalg-on-tensors-no-contract-check.mlir b/test/Dialect/TorchConversion/torch-backend-to-linalg-on-tensors-no-contract-check.mlir new file mode 100644 index 000000000000..33fbfcb90c66 --- /dev/null +++ b/test/Dialect/TorchConversion/torch-backend-to-linalg-on-tensors-no-contract-check.mlir @@ -0,0 +1,24 @@ +// RUN: torch-mlir-opt -p 'builtin.module(torch-backend-to-linalg-on-tensors-backend-pipeline{verify=0})' -split-input-file %s | FileCheck %s + +// CHECK: func.func @tosa +func.func @tosa(%arg0: tensor) -> tensor { + // CHECK: tosa.abs + %1 = tosa.abs %arg0 : (tensor) -> tensor + return %1 : tensor +} + +// ----- + +// CHECK: func.func @torch_gemm +func.func @torch_gemm(%arg0: tensor, %arg1: tensor<3x?xf32>, %arg2: tensor) -> (tensor {onnx.name = "gemm"}) attributes {torch.onnx_meta.opset_version = 19 : si64} { + %0 = torch_c.from_builtin_tensor %arg0 : tensor -> !torch.vtensor<[?,3],f32> + %1 = torch_c.from_builtin_tensor %arg1 : tensor<3x?xf32> -> !torch.vtensor<[3,?],f32> + %2 = torch_c.from_builtin_tensor %arg2 : tensor -> !torch.vtensor<[?,?],f32> + %int0 = torch.constant.int 0 + %int1 = torch.constant.int 1 + %3 = torch.aten.mm %0, %1 : !torch.vtensor<[?,3],f32>, !torch.vtensor<[3,?],f32> -> !torch.vtensor<[?,?],f32> + %4 = torch.aten.add.Tensor %3, %2, %int1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32>, !torch.int -> !torch.vtensor<[?,?],f32> + %5 = torch_c.to_builtin_tensor %4 : !torch.vtensor<[?,?],f32> -> tensor + %6 = tosa.abs %5 : (tensor) -> tensor + return %6 : tensor +} diff --git a/test/Dialect/TorchConversion/torch-backend-to-linalg-on-tensors-no-mlprogram.mlir b/test/Dialect/TorchConversion/torch-backend-to-linalg-on-tensors-no-mlprogram.mlir new file mode 100644 index 000000000000..52280ecdfa0f --- /dev/null +++ b/test/Dialect/TorchConversion/torch-backend-to-linalg-on-tensors-no-mlprogram.mlir @@ -0,0 +1,17 @@ +// RUN: torch-mlir-opt -p 'builtin.module(torch-backend-to-linalg-on-tensors-backend-pipeline{use-mlprogram=0})' -split-input-file %s | FileCheck %s +// RUN: torch-mlir-opt -p 'builtin.module(torch-backend-to-linalg-on-tensors-backend-pipeline{use-mlprogram=1})' -split-input-file %s | FileCheck --check-prefix=YES-CHECK %s + +// CHECK-NOT: ml_program.global{{.*}}@global_seed +// YES-CHECK: ml_program.global{{.*}}@global_seed +// CHECK: func.func @torch_gemm +func.func @torch_gemm(%arg0: tensor, %arg1: tensor<3x?xf32>, %arg2: tensor) -> (tensor {onnx.name = "gemm"}) attributes {torch.onnx_meta.opset_version = 19 : si64} { + %0 = torch_c.from_builtin_tensor %arg0 : tensor -> !torch.vtensor<[?,3],f32> + %1 = torch_c.from_builtin_tensor %arg1 : tensor<3x?xf32> -> !torch.vtensor<[3,?],f32> + %2 = torch_c.from_builtin_tensor %arg2 : tensor -> !torch.vtensor<[?,?],f32> + %int0 = torch.constant.int 0 + %int1 = torch.constant.int 1 + %3 = torch.aten.mm %0, %1 : !torch.vtensor<[?,3],f32>, !torch.vtensor<[3,?],f32> -> !torch.vtensor<[?,?],f32> + %4 = torch.aten.add.Tensor %3, %2, %int1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],f32>, !torch.int -> !torch.vtensor<[?,?],f32> + %5 = torch_c.to_builtin_tensor %4 : !torch.vtensor<[?,?],f32> -> tensor + return %5 : tensor +}