Skip to content

Commit

Permalink
[Onnx] Add Onnx->Torch lowering for Onnx.Shrink Op (llvm#3385)
Browse files Browse the repository at this point in the history
Signed-Off By: Vivek Khandelwal <[email protected]>
  • Loading branch information
vivekkhandelwal1 authored Jun 7, 2024
1 parent 1c2778d commit 1a9c0a3
Show file tree
Hide file tree
Showing 2 changed files with 108 additions and 0 deletions.
68 changes: 68 additions & 0 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainQtoZ.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3050,4 +3050,72 @@ void mlir::torch::onnx_c::populateDefaultDomainQtoZ(
binder.op, resultType, permutedInput, reshapeSizesList);
return success();
});
patterns.onOp(
"Shrink", 9, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
Location loc = binder.getLoc();
Torch::ValueTensorType resultType;
Value input;
float bias, lambd;
if (binder.tensorOperand(input) ||
binder.f32FloatAttr(bias, "bias", 0.0) ||
binder.f32FloatAttr(lambd, "lambd", 0.5) ||
binder.tensorResultType(resultType)) {
return failure();
}

Torch::ValueTensorType inputType =
cast<Torch::ValueTensorType>(input.getType());
if (!isa<mlir::FloatType>(inputType.getDtype()))
return rewriter.notifyMatchFailure(
binder.op, "unimplemented: non-floating point dtype");

// The formula of this operator is: If x < -lambd, y = x + bias; If x >
// lambd, y = x - bias; Otherwise, y = 0.
// The implementation is based on the following algorithm:
// Shrink <bias,lambd>(input) => (output)
// {
// Lambd = Constant <value_float: float = @lambd> ()
// LambdCast = CastLike (Lambd, input)
// Bias = Constant <value_float: float = @bias> ()
// BiasCast = CastLike (Bias, input)
// Zero = Constant <value: tensor = float {0}> ()
// ZeroCast = CastLike (Zero, input)
// NegLmbda = Neg (LambdCast)
// InputLessThanNegLambda = Less (input, NegLmbda)
// InputAddBias = Add (input, BiasCast)
// InputSubBias = Sub (input, BiasCast)
// LambdaLessThanInput = Less (LambdCast, input)
// InputSubBiasOrZero = Where (LambdaLessThanInput, InputSubBias,
// ZeroCast) output = Where (InputLessThanNegLambda, InputAddBias,
// InputSubBiasOrZero)
// }
Value constLambd = rewriter.create<Torch::ConstantFloatOp>(
loc, rewriter.getFloatAttr(rewriter.getF64Type(), lambd));
Value constBias = rewriter.create<Torch::ConstantFloatOp>(
loc, rewriter.getFloatAttr(rewriter.getF64Type(), bias));
Value constZero = rewriter.create<Torch::ConstantFloatOp>(
loc, rewriter.getFloatAttr(rewriter.getF64Type(), 0.0));
Value constOne = rewriter.create<Torch::ConstantFloatOp>(
loc, rewriter.getFloatAttr(rewriter.getF64Type(), 1.0));
Value constNegLambd = rewriter.create<Torch::ConstantFloatOp>(
loc, rewriter.getFloatAttr(rewriter.getF64Type(), -lambd));

Value inputLTNegLambd = rewriter.create<Torch::AtenLtScalarOp>(
loc, inputType, input, constNegLambd);
Value inputPlusBias = rewriter.create<Torch::AtenAddScalarOp>(
loc, inputType, input, constBias, /*alpha=*/constOne);
Value inputSubBias = rewriter.create<Torch::AtenSubScalarOp>(
loc, inputType, input, constBias, /*alpha=*/constOne);
Value inputGTLambd = rewriter.create<Torch::AtenGtScalarOp>(
loc, inputType, input, constLambd);

Value inputSubBiasOrZero =
rewriter.create<Torch::AtenWhereScalarOtherOp>(
loc, resultType, inputGTLambd, inputSubBias, constZero);
rewriter.replaceOpWithNewOp<Torch::AtenWhereSelfOp>(
binder.op, resultType, inputLTNegLambd, inputPlusBias,
inputSubBiasOrZero);

return success();
});
}
40 changes: 40 additions & 0 deletions test/Conversion/TorchOnnxToTorch/simple_ops_q_to_z.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -2299,3 +2299,43 @@ func.func @test_spacetodepth_dynamic_dims(%arg0: !torch.vtensor<[?,?,?,?],f32>)
%0 = torch.operator "onnx.SpaceToDepth"(%arg0) {torch.onnx.blocksize = 2 : si64} : (!torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,?,?,?],f32>
return %0 : !torch.vtensor<[?,?,?,?],f32>
}

// -----

// CHECK-LABEL: func.func @Shrink
func.func @Shrink(%arg0: !torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32> attributes {torch.onnx_meta.ir_version = 5 : si64, torch.onnx_meta.opset_version = 10 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
// CHECK: %float1.500000e00 = torch.constant.float 1.500000e+00
// CHECK: %float1.500000e00_0 = torch.constant.float 1.500000e+00
// CHECK: %float0.000000e00 = torch.constant.float 0.000000e+00
// CHECK: %float1.000000e00 = torch.constant.float 1.000000e+00
// CHECK: %float-1.500000e00 = torch.constant.float -1.500000e+00
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %1 = torch.aten.add.Scalar %arg0, %float1.500000e00_0, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %2 = torch.aten.sub.Scalar %arg0, %float1.500000e00_0, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: return %5 : !torch.vtensor<[5],f32>
%0 = torch.operator "onnx.Shrink"(%arg0) {torch.onnx.bias = 1.500000e+00 : f32, torch.onnx.lambd = 1.500000e+00 : f32} : (!torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32>
return %0 : !torch.vtensor<[5],f32>
}

// -----

// CHECK-LABEL: func.func @test_shrink_hard
func.func @test_shrink_hard(%arg0: !torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32> attributes {torch.onnx_meta.ir_version = 4 : si64, torch.onnx_meta.opset_version = 9 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
// CHECK: %float1.500000e00 = torch.constant.float 1.500000e+00
// CHECK: %float0.000000e00 = torch.constant.float 0.000000e+00
// CHECK: %float0.000000e00_0 = torch.constant.float 0.000000e+00
// CHECK: %float1.000000e00 = torch.constant.float 1.000000e+00
// CHECK: %float-1.500000e00 = torch.constant.float -1.500000e+00
// CHECK: %0 = torch.aten.lt.Scalar %arg0, %float-1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %1 = torch.aten.add.Scalar %arg0, %float0.000000e00, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %2 = torch.aten.sub.Scalar %arg0, %float0.000000e00, %float1.000000e00 : !torch.vtensor<[5],f32>, !torch.float, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %3 = torch.aten.gt.Scalar %arg0, %float1.500000e00 : !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %4 = torch.aten.where.ScalarOther %3, %2, %float0.000000e00_0 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.float -> !torch.vtensor<[5],f32>
// CHECK: %5 = torch.aten.where.self %0, %1, %4 : !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32>, !torch.vtensor<[5],f32> -> !torch.vtensor<[5],f32>
// CHECK: return %5 : !torch.vtensor<[5],f32>
%0 = torch.operator "onnx.Shrink"(%arg0) {torch.onnx.lambd = 1.500000e+00 : f32} : (!torch.vtensor<[5],f32>) -> !torch.vtensor<[5],f32>
return %0 : !torch.vtensor<[5],f32>
}

0 comments on commit 1a9c0a3

Please sign in to comment.