diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index e2b81ba864c3c..2ec7eb55535fd 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3226,12 +3226,13 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { // TODO: apply range metadata for range check patterns? } - // Separate storage assumptions apply to the underlying allocations, not any - // particular pointer within them. When evaluating the hints for AA purposes - // we getUnderlyingObject them; by precomputing the answers here we can - // avoid having to do so repeatedly there. for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) { OperandBundleUse OBU = II->getOperandBundleAt(Idx); + + // Separate storage assumptions apply to the underlying allocations, not + // any particular pointer within them. When evaluating the hints for AA + // purposes we getUnderlyingObject them; by precomputing the answers here + // we can avoid having to do so repeatedly there. if (OBU.getTagName() == "separate_storage") { assert(OBU.Inputs.size() == 2); auto MaybeSimplifyHint = [&](const Use &U) { @@ -3245,6 +3246,30 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { MaybeSimplifyHint(OBU.Inputs[0]); MaybeSimplifyHint(OBU.Inputs[1]); } + + // Try to fold alignment assumption into a load's !align metadata, if the + // assumption is valid in the load's context and remove redundant ones. + if (OBU.getTagName() == "align" && OBU.Inputs.size() == 2) { + RetainedKnowledge RK = getKnowledgeFromBundle( + *cast(II), II->bundle_op_info_begin()[Idx]); + if (!RK || RK.AttrKind != Attribute::Alignment || + !isPowerOf2_64(RK.ArgValue)) + continue; + + // Don't try to remove align assumptions for pointers derived from + // arguments. We might lose information if the function gets inline and + // the align argument attribute disappears. + Value *UO = getUnderlyingObject(RK.WasOn); + if (!UO || isa(UO)) + continue; + + KnownBits Known = computeKnownBits(RK.WasOn, 0, nullptr); + unsigned TZ = std::min(Known.countMinTrailingZeros(), 63u); + if ((1ULL << TZ) < RK.ArgValue) + continue; + auto *New = CallBase::removeOperandBundle(II, OBU.getTagID()); + return New; + } } // Convert nonnull assume like: diff --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll index f0e0257433086..5fa8c78acd727 100644 --- a/llvm/test/Transforms/InstCombine/assume-align.ll +++ b/llvm/test/Transforms/InstCombine/assume-align.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals smart -; RUN: opt -S -passes=instcombine,simplifycfg < %s 2>&1 | FileCheck %s +; RUN: opt -S -passes='instcombine,simplifycfg' < %s 2>&1 | FileCheck %s declare void @llvm.assume(i1 noundef) @@ -135,6 +135,17 @@ define ptr @fold_assume_align_pow2_of_loaded_pointer_into_align_metadata(ptr %p) ret ptr %p2 } +define ptr @fold_assume_align_i32_pow2_of_loaded_pointer_into_align_metadata(ptr %p) { +; CHECK-LABEL: @fold_assume_align_i32_pow2_of_loaded_pointer_into_align_metadata( +; CHECK-NEXT: [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8 +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i32 8) ] +; CHECK-NEXT: ret ptr [[P2]] +; + %p2 = load ptr, ptr %p + call void @llvm.assume(i1 true) [ "align"(ptr %p2, i32 8) ] + ret ptr %p2 +} + define ptr @dont_fold_assume_align_pow2_of_loaded_pointer_into_align_metadata_due_to_call(ptr %p) { ; CHECK-LABEL: @dont_fold_assume_align_pow2_of_loaded_pointer_into_align_metadata_due_to_call( ; CHECK-NEXT: [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8 @@ -175,7 +186,6 @@ define ptr @dont_fold_assume_align_zero_of_loaded_pointer_into_align_metadata(pt define ptr @redundant_assume_align_1(ptr %p) { ; CHECK-LABEL: @redundant_assume_align_1( ; CHECK-NEXT: [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8 -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i32 1) ] ; CHECK-NEXT: call void @foo(ptr [[P2]]) ; CHECK-NEXT: ret ptr [[P2]] ; @@ -189,7 +199,6 @@ define ptr @redundant_assume_align_1(ptr %p) { define ptr @redundant_assume_align_8_via_align_metadata(ptr %p) { ; CHECK-LABEL: @redundant_assume_align_8_via_align_metadata( ; CHECK-NEXT: [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8, !align [[META0:![0-9]+]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P2]], i32 8) ] ; CHECK-NEXT: call void @foo(ptr [[P2]]) ; CHECK-NEXT: ret ptr [[P2]] ; @@ -249,7 +258,48 @@ define ptr @redundant_assume_align_8_via_asume(ptr %p) { ret ptr %p } +define void @redundant_arg_passed_to_intrinsic(ptr %dst, ptr %src) { +; CHECK-LABEL: @redundant_arg_passed_to_intrinsic( +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[DST:%.*]], i32 8) ] +; CHECK-NEXT: call void @bar() +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[SRC:%.*]], i32 8) ] +; CHECK-NEXT: call void @llvm.memmove.p0.p0.i64(ptr noundef nonnull align 8 dereferenceable(16) [[DST]], ptr noundef nonnull align 8 dereferenceable(16) [[SRC]], i64 16, i1 false) +; CHECK-NEXT: ret void +; + call void @llvm.assume(i1 true) [ "align"(ptr %dst, i32 8) ] + call void @bar() + call void @llvm.assume(i1 true) [ "align"(ptr %src, i32 8) ] + call void @llvm.memmove.p0.p0.i64(ptr align 8 %dst, ptr %src, i64 16, i1 false) + ret void +} + +define void @test_store(ptr %ptr) { +; CHECK-LABEL: @test_store( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[PTR:%.*]], i64 2) ] +; CHECK-NEXT: store i16 0, ptr [[PTR]], align 1 +; CHECK-NEXT: ret void +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %ptr, i64 2) ] + store i16 0, ptr %ptr, align 1 + ret void +} + declare void @foo(ptr) +declare void @bar() + +; !align must have a constant integer alignment. +define ptr @dont_fold_assume_align_not_constant_of_loaded_pointer_into_align_metadata(ptr %p, i64 %align) { +; CHECK-LABEL: @dont_fold_assume_align_not_constant_of_loaded_pointer_into_align_metadata( +; CHECK-NEXT: [[P2:%.*]] = load ptr, ptr [[P:%.*]], align 8 +; CHECK-NEXT: ret ptr [[P2]] +; + %p2 = load ptr, ptr %p + call void @llvm.assume(i1 true) [ "align"(ptr %p2, i64 %align) ] + ret ptr %p2 +} + ;. ; CHECK: [[META0]] = !{i64 8} ;. diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll index c21f8457e82d1..073fc75fb8ff4 100644 --- a/llvm/test/Transforms/InstCombine/assume.ll +++ b/llvm/test/Transforms/InstCombine/assume.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -passes=instcombine -S | FileCheck --check-prefixes=CHECK,DEFAULT %s -; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S | FileCheck --check-prefixes=CHECK,BUNDLES %s +; RUN: opt < %s -passes='instcombine' -S | FileCheck --check-prefixes=CHECK,DEFAULT %s +; RUN: opt < %s -passes='instcombine' --enable-knowledge-retention -S | FileCheck --check-prefixes=CHECK,BUNDLES %s -; RUN: opt < %s -passes=instcombine -S --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=CHECK,DEFAULT %s -; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=CHECK,BUNDLES %s +; RUN: opt < %s -passes='instcombine' -S --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=CHECK,DEFAULT %s +; RUN: opt < %s -passes='instcombine' --enable-knowledge-retention -S --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=CHECK,BUNDLES %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu"