From fc350f06f6ba5291041809d2a903b47da4ecf23d Mon Sep 17 00:00:00 2001 From: Yingwei Zheng Date: Tue, 5 Mar 2024 21:44:16 +0800 Subject: [PATCH] [InstCombine] Simplify with.overflow intrinsics with assumption information --- .../InstCombine/InstCombineCalls.cpp | 27 +++++++++++++++++++ llvm/test/Transforms/InstCombine/overflow.ll | 16 +++-------- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 50c0f9a913f32a..f76d094c6dd869 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -818,6 +818,33 @@ InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) { if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(), WO->getRHS(), *WO, OperationResult, OverflowResult)) return createOverflowTuple(WO, OperationResult, OverflowResult); + + // See whether we can optimize the overflow check with assumption information. + for (User *U : WO->users()) { + if (!match(U, m_ExtractValue<1>(m_Value()))) + continue; + + for (auto &AssumeVH : AC.assumptionsFor(U)) { + if (!AssumeVH) + continue; + CallInst *I = cast(AssumeVH); + if (!match(I->getArgOperand(0), m_Not(m_Specific(U)))) + continue; + if (!isValidAssumeForContext(I, II, &DT)) + continue; + Value *Result = + Builder.CreateBinOp(WO->getBinaryOp(), WO->getLHS(), WO->getRHS()); + if (auto *Inst = dyn_cast(Result)) { + if (WO->isSigned()) + Inst->setHasNoSignedWrap(); + else + Inst->setHasNoUnsignedWrap(); + } + return createOverflowTuple(WO, Result, + ConstantInt::getFalse(U->getType())); + } + } + return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/overflow.ll b/llvm/test/Transforms/InstCombine/overflow.ll index cd4289f84e963e..22e1631f78ee91 100644 --- a/llvm/test/Transforms/InstCombine/overflow.ll +++ b/llvm/test/Transforms/InstCombine/overflow.ll @@ -173,12 +173,8 @@ if.end: define i32 @uadd_no_overflow(i32 %a, i32 %b) { ; CHECK-LABEL: @uadd_no_overflow( -; CHECK-NEXT: [[VAL:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]]) -; CHECK-NEXT: [[OV:%.*]] = extractvalue { i32, i1 } [[VAL]], 1 -; CHECK-NEXT: [[NOWRAP:%.*]] = xor i1 [[OV]], true -; CHECK-NEXT: tail call void @llvm.assume(i1 [[NOWRAP]]) -; CHECK-NEXT: [[RES:%.*]] = extractvalue { i32, i1 } [[VAL]], 0 -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: [[TMP1:%.*]] = add nuw i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: ret i32 [[TMP1]] ; %val = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) %ov = extractvalue { i32, i1 } %val, 1 @@ -190,12 +186,8 @@ define i32 @uadd_no_overflow(i32 %a, i32 %b) { define i32 @smul_no_overflow(i32 %a, i32 %b) { ; CHECK-LABEL: @smul_no_overflow( -; CHECK-NEXT: [[VAL:%.*]] = tail call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]]) -; CHECK-NEXT: [[OV:%.*]] = extractvalue { i32, i1 } [[VAL]], 1 -; CHECK-NEXT: [[NOWRAP:%.*]] = xor i1 [[OV]], true -; CHECK-NEXT: tail call void @llvm.assume(i1 [[NOWRAP]]) -; CHECK-NEXT: [[RES:%.*]] = extractvalue { i32, i1 } [[VAL]], 0 -; CHECK-NEXT: ret i32 [[RES]] +; CHECK-NEXT: [[TMP1:%.*]] = mul nsw i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: ret i32 [[TMP1]] ; %val = tail call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %a, i32 %b) %ov = extractvalue { i32, i1 } %val, 1