diff --git a/test/Integration/tpp-run-print-ir.mlir b/test/Integration/tpp-run-print-ir.mlir new file mode 100644 index 000000000..4d1bea770 --- /dev/null +++ b/test/Integration/tpp-run-print-ir.mlir @@ -0,0 +1,17 @@ +// RUN: tpp-run %s -e entry -entry-point-result=void -mlir-print-ir-before=bufferize 2>&1 | FileCheck %s --check-prefix=BEFORE +// RUN: tpp-run %s -e entry -entry-point-result=void -mlir-print-ir-after=bufferize 2>&1 | FileCheck %s --check-prefix=AFTER + +func.func @entry(%arg0: tensor<128x512xf32>, %arg1: tensor<512x256xf32>, %arg2: tensor<128x256xf32>) + -> tensor<128x256xf32> { + %0 = linalg.matmul ins(%arg0, %arg1: tensor<128x512xf32>, tensor<512x256xf32>) + outs(%arg2: tensor<128x256xf32>) -> tensor<128x256xf32> + return %0 : tensor<128x256xf32> +} + +// BEFORE: IR Dump Before Bufferize (bufferize) +// BEFORE-LABEL: @_entry( +// BEFORE: linalg.batch_reduce_matmul{{.*}}tensor< + +// AFTER: IR Dump After Bufferize (bufferize) +// AFTER-LABEL: @_entry( +// AFTER: linalg.batch_reduce_matmul{{.*}}memref< diff --git a/tools/tpp-run/tpp-run.cpp b/tools/tpp-run/tpp-run.cpp index c5b868a26..7db7f81c2 100644 --- a/tools/tpp-run/tpp-run.cpp +++ b/tools/tpp-run/tpp-run.cpp @@ -158,6 +158,10 @@ static LogicalResult prepareMLIRKernel(Operation *op, // A set of default passes that lower any input IR to LLVM PassManager passManager(module.getContext()); + // Propagate pass manager's command-line options. + if (failed(applyPassManagerCLOptions(passManager))) + return failure(); + tpp::TppRunnerWrapperOptions wrapperOpts; wrapperOpts.kernelName = options.mainFuncName; wrapperOpts.kernelType = options.mainFuncType; @@ -275,6 +279,11 @@ int main(int argc, char **argv) { // Initialize GPU-related LLVM machinery tpp::initializeGpuTargets(); + // Register all passes to expose them for debugging + mlir::registerAllPasses(); + mlir::tpp::registerTppCompilerPasses(); + mlir::tpp::registerTppPassBundlePasses(); + // Add the following to include *all* MLIR Core dialects, or selectively // include what you need like above. You only need to register dialects that // will be *parsed* by the tool, not the one generated @@ -288,6 +297,10 @@ int main(int argc, char **argv) { mlir::linalg::registerTransformDialectExtension(registry); mlir::tensor::registerTransformDialectExtension(registry); + // Add pass manager CLI debug options - exposes IR printing capabilities + // same as in opt tool + mlir::registerPassManagerCLOptions(); + // This is how we integrate with the pipeline JitRunnerConfig config; config.mlirTransformer = prepareMLIRKernel;