Skip to content

Commit

Permalink
[mlir][Linalg] Drop spurious error message
Browse files Browse the repository at this point in the history
Drop usage of `emitRemark` and use `notifyMatchFailure` instead to
avoid unnecessary spew during compilation.

Differential Revision: https://reviews.llvm.org/D99485
  • Loading branch information
MaheshRavishankar authored and MatteCarra committed Mar 30, 2021
1 parent 24f8bb2 commit 125d50c
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 8 deletions.
18 changes: 12 additions & 6 deletions mlir/lib/Dialect/Linalg/Transforms/FusionOnTensors.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -880,8 +880,10 @@ struct FoldProducerReshapeOpByLinearization

// Further check that the resulting index maps can be fused and
// inverted. Without this the resultant op is not legal.
if (!inversePermutation(concatAffineMaps(fusedIndexMaps)))
return op.emitRemark("fused op loop bound computation failed");
if (!inversePermutation(concatAffineMaps(fusedIndexMaps))) {
return rewriter.notifyMatchFailure(
op, "fused op loop bound computation failed");
}

rewriter.startRootUpdate(op);
op->setOperands(fusedOperands);
Expand Down Expand Up @@ -973,15 +975,19 @@ struct FoldConsumerReshapeOpByLinearization
linearizeCollapsedDims(invMap, reshapeOp.getSrcType().getShape(),
reshapeOp.getReassociationMaps());
for (AffineExpr expr : modifiedMap.getResults()) {
if (!expr.isPureAffine())
return producer.emitRemark("fused op indexing map is not affine");
if (!expr.isPureAffine()) {
return rewriter.notifyMatchFailure(
producer, "fused op indexing map is not affine");
}
}
fusedIndexMaps.back() = modifiedMap;

// Further check that the resulting index maps can be fused and
// inverted. Without this the resultant op is not legal.
if (!inversePermutation(concatAffineMaps(fusedIndexMaps)))
return reshapeOp.emitRemark("fused op loop bound computation failed");
if (!inversePermutation(concatAffineMaps(fusedIndexMaps))) {
return rewriter.notifyMatchFailure(
producer, "fused op loop bound computation failed");
}

Location loc = producer.getLoc();
Value output = rewriter.create<TensorReshapeOp>(
Expand Down
10 changes: 8 additions & 2 deletions mlir/test/Dialect/Linalg/reshape_linearization_fusion.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt -split-input-file -linalg-fold-reshape-ops-by-linearization -verify-diagnostics %s | FileCheck %s
// RUN: mlir-opt -split-input-file -linalg-fold-reshape-ops-by-linearization %s | FileCheck %s

#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xf32>,
Expand Down Expand Up @@ -257,7 +257,6 @@ func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
%arg1 : tensor<?x?x?x5xf32>) ->
tensor<?x?xf32>
{
// expected-remark @+1 {{fused op indexing map is not affine}}
%0 = linalg.generic {
indexing_maps = [#map0, #map0, #map0],
iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
Expand All @@ -272,3 +271,10 @@ func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
tensor<?x?x?x5xf32> into tensor<?x?xf32>
return %1 : tensor<?x?xf32>
}
// CHECK-LABEL: func @generic_op_reshape_consumer_nofusion
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<?x?x?x5xf32>
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<?x?x?x5xf32>
// CHECK: %[[NOFUSE:.+]] = linalg.generic
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]]
// CHECK: %[[RESULT:.+]] = linalg.tensor_reshape %[[NOFUSE]]
// CHECK: return %[[RESULT]]

0 comments on commit 125d50c

Please sign in to comment.