Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 63 additions & 0 deletions llvm/lib/Transforms/Scalar/EarlyCSE.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopedHashTable.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AssumptionCache.h"
Expand All @@ -31,6 +32,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
Expand Down Expand Up @@ -1600,6 +1602,67 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
if (InVal.IsLoad)
if (auto *I = dyn_cast<Instruction>(Op))
combineMetadataForCSE(I, &Inst, false);

// If the load has align and noundef metadata, preserve it via an
// alignment assumption. Note that this doesn't use salavageKnowledge,
// as we need to create the assumption for the value we replaced the
// load with.
if (auto *AlignMD = Inst.getMetadata(LLVMContext::MD_align)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You also need !noundef for this transform. !align by itself only returns poison, while the assumption converts it into IUB.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should be updated, thanks!

if (Inst.hasMetadata(LLVMContext::MD_noundef) ||
programUndefinedIfPoison(&Inst)) {
Inst.setMetadata(LLVMContext::MD_align, nullptr);
auto *B = mdconst::extract<ConstantInt>(AlignMD->getOperand(0));
auto KB = computeKnownBits(Op, SQ.DL);
unsigned AlignFromKB = 1 << KB.countMinTrailingZeros();
if (AlignFromKB < B->getZExtValue()) {
SetVector<const Value *> WorkList;
bool AlignNeeded = false;
for (const User *U : Inst.users())
if (auto *I = dyn_cast<Instruction>(U))
WorkList.insert(I);

for (unsigned I = 0; I != WorkList.size(); ++I) {
auto *Curr = WorkList[I];
if (auto *LI = dyn_cast<LoadInst>(Curr)) {
if (LI->getAlign().value() < B->getZExtValue()) {
AlignNeeded = true;
break;
}
continue;
}
if (auto *SI = dyn_cast<StoreInst>(Curr)) {
if (SI->getAlign().value() < B->getZExtValue()) {
AlignNeeded = true;
break;
}
continue;
}
if (isa<ReturnInst>(Curr)) {
AlignNeeded = true;
break;
}
if (isa<ICmpInst>(Curr) &&
!isa<Constant>(cast<Instruction>(Curr)->getOperand(0)) &&
!isa<Constant>(cast<Instruction>(Curr)->getOperand(1))) {
AlignNeeded = true;
break;
}
if (WorkList.size() > 16) {
AlignNeeded = true;
break;
}

for (const User *U : Curr->users())
WorkList.insert(cast<Instruction>(U));
}
if (AlignNeeded) {
IRBuilder Builder(&Inst);
Builder.CreateAlignmentAssumption(SQ.DL, Op, B);
}
}
}
}

if (!Inst.use_empty())
Inst.replaceAllUsesWith(Op);
salvageKnowledge(&Inst, &AC);
Expand Down
38 changes: 30 additions & 8 deletions llvm/test/Transforms/EarlyCSE/materialize-align-assumptions.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,40 @@

declare void @foo(ptr)

define ptr @align_replacement_does_not_have_align_metadata_missing_noundef(ptr noalias %p) {
; CHECK-LABEL: define ptr @align_replacement_does_not_have_align_metadata_missing_noundef(
; CHECK-SAME: ptr noalias [[P:%.*]]) {
; CHECK-NEXT: [[L_1:%.*]] = load ptr, ptr [[P]], align 8
; CHECK-NEXT: call void @foo(ptr [[L_1]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[L_1]], i64 4
; CHECK-NEXT: store ptr [[GEP]], ptr [[P]], align 8
; CHECK-NEXT: ret ptr [[GEP]]
;
%l.1 = load ptr, ptr %p, align 8
call void @foo(ptr %l.1)
%l.2 = load ptr, ptr %p, align 8
%gep = getelementptr i8, ptr %l.2, i64 4
store ptr %gep, ptr %p, align 8
%l.3 = load ptr, ptr %p, align 8, !align !0
ret ptr %l.3
}

define ptr @align_replacement_does_not_have_align_metadata(ptr noalias %p) {
; CHECK-LABEL: define ptr @align_replacement_does_not_have_align_metadata(
; CHECK-SAME: ptr noalias [[P:%.*]]) {
; CHECK-NEXT: [[L_1:%.*]] = load ptr, ptr [[P]], align 8
; CHECK-NEXT: call void @foo(ptr [[L_1]])
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[L_1]], i64 4
; CHECK-NEXT: store ptr [[GEP]], ptr [[P]], align 8
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP]], i64 4) ]
; CHECK-NEXT: ret ptr [[GEP]]
;
%l.1 = load ptr, ptr %p, align 8
call void @foo(ptr %l.1)
%l.2 = load ptr, ptr %p, align 8
%gep = getelementptr i8, ptr %l.2, i64 4
store ptr %gep, ptr %p, align 8
%l.3 = load ptr, ptr %p, align 8, !align !0
%l.3 = load ptr, ptr %p, align 8, !align !0, !noundef !{}
ret ptr %l.3
}

Expand All @@ -27,12 +46,13 @@ define ptr @align_replacement_does_not_have_align_metadata2(ptr noalias %p) {
; CHECK-NEXT: [[L_1:%.*]] = load ptr, ptr [[P]], align 8
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[L_1]], i64 4
; CHECK-NEXT: store ptr [[GEP]], ptr [[P]], align 8
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP]], i64 4) ]
; CHECK-NEXT: ret ptr [[GEP]]
;
%l.1 = load ptr, ptr %p, align 8
%gep = getelementptr i8, ptr %l.1, i64 4
store ptr %gep, ptr %p, align 8
%l.2 = load ptr, ptr %p, align 8, !align !0
%l.2 = load ptr, ptr %p, align 8, !align !0, !noundef !{}
ret ptr %l.2
}

Expand All @@ -54,11 +74,12 @@ define ptr @align_replacement_has_smaller_alignment(ptr noalias %p) {
; CHECK-SAME: ptr noalias [[P:%.*]]) {
; CHECK-NEXT: [[L_1:%.*]] = load ptr, ptr [[P]], align 8, !align [[META0]]
; CHECK-NEXT: call void @foo(ptr [[L_1]])
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[L_1]], i64 8) ]
; CHECK-NEXT: ret ptr [[L_1]]
;
%l.1 = load ptr, ptr %p, align 8, !align !0
call void @foo(ptr %l.1)
%l.2 = load ptr, ptr %p, align 8, !align !1
%l.2 = load ptr, ptr %p, align 8, !align !1, !noundef !{}
ret ptr %l.2
}

Expand All @@ -67,12 +88,12 @@ define ptr @align_replacement_has_larger_alignment(ptr %p) {
; CHECK-SAME: ptr [[P:%.*]]) {
; CHECK-NEXT: [[L_1:%.*]] = load ptr, ptr [[P]], align 8, !align [[META1:![0-9]+]]
; CHECK-NEXT: call void @foo(ptr [[L_1]])
; CHECK-NEXT: [[L_2:%.*]] = load ptr, ptr [[P]], align 8, !align [[META0]]
; CHECK-NEXT: [[L_2:%.*]] = load ptr, ptr [[P]], align 8, !align [[META0]], !noundef [[META2:![0-9]+]]
; CHECK-NEXT: ret ptr [[L_2]]
;
%l.1 = load ptr, ptr %p, align 8, !align !1
call void @foo(ptr %l.1)
%l.2 = load ptr, ptr %p, align 8, !align !0
%l.2 = load ptr, ptr %p, align 8, !align !0, !noundef !{}
ret ptr %l.2
}

Expand All @@ -81,12 +102,12 @@ define ptr @align_1(ptr %p) {
; CHECK-SAME: ptr [[P:%.*]]) {
; CHECK-NEXT: [[L_1:%.*]] = load ptr, ptr [[P]], align 8
; CHECK-NEXT: call void @foo(ptr [[L_1]])
; CHECK-NEXT: [[L_2:%.*]] = load ptr, ptr [[P]], align 8, !align [[META2:![0-9]+]]
; CHECK-NEXT: [[L_2:%.*]] = load ptr, ptr [[P]], align 8, !align [[META3:![0-9]+]], !noundef [[META2]]
; CHECK-NEXT: ret ptr [[L_2]]
;
%l.1 = load ptr, ptr %p, align 8
call void @foo(ptr %l.1)
%l.2 = load ptr, ptr %p, align 8, !align !2
%l.2 = load ptr, ptr %p, align 8, !align !2, !noundef !{}
ret ptr %l.2
}

Expand All @@ -96,5 +117,6 @@ define ptr @align_1(ptr %p) {
;.
; CHECK: [[META0]] = !{i64 4}
; CHECK: [[META1]] = !{i64 8}
; CHECK: [[META2]] = !{i64 1}
; CHECK: [[META2]] = !{}
; CHECK: [[META3]] = !{i64 1}
;.
82 changes: 82 additions & 0 deletions llvm/test/Transforms/PhaseOrdering/infer-align-from-assumption.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes='default<O3>' -S %s | FileCheck %s

target triple = "arm64-apple-macosx"

declare void @llvm.assume(i1 noundef)

define i32 @entry(ptr %0) {
; CHECK-LABEL: define i32 @entry(
; CHECK-SAME: ptr captures(none) [[TMP0:%.*]]) local_unnamed_addr {
; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
; CHECK-NEXT: [[DOT0_COPYLOAD_I_I_I:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I_I]])
; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 4
; CHECK-NEXT: store ptr [[TMP5]], ptr [[TMP0]], align 8
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP5]], i64 4) ]
; CHECK-NEXT: [[DOT0_COPYLOAD_I_I_I1:%.*]] = load i32, ptr [[TMP5]], align 4
; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I_I1]])
; CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP0]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i64 4
; CHECK-NEXT: store ptr [[TMP8]], ptr [[TMP0]], align 8
; CHECK-NEXT: ret i32 [[TMP6]]
;
%2 = call i32 @fn1(ptr %0)
%3 = call i32 @fn1(ptr %0)
ret i32 %3
}


define i32 @fn1(ptr %0) {
; CHECK-LABEL: define i32 @fn1(
; CHECK-SAME: ptr captures(none) [[TMP0:%.*]]) local_unnamed_addr {
; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
; CHECK-NEXT: [[DOT0_COPYLOAD_I_I:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I_I]])
; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 4
; CHECK-NEXT: store ptr [[TMP5]], ptr [[TMP0]], align 8
; CHECK-NEXT: ret i32 [[TMP3]]
;
%2 = call i32 @fn2(ptr %0)
ret i32 %2
}

define i32 @fn2(ptr %0) {
; CHECK-LABEL: define i32 @fn2(
; CHECK-SAME: ptr captures(none) [[TMP0:%.*]]) local_unnamed_addr {
; CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4) ]
; CHECK-NEXT: [[DOT0_COPYLOAD_I:%.*]] = load i32, ptr [[TMP2]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD_I]])
; CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i64 4
; CHECK-NEXT: store ptr [[TMP5]], ptr [[TMP0]], align 8
; CHECK-NEXT: ret i32 [[TMP3]]
;
%2 = load ptr, ptr %0, align 8
%3 = call i32 @load_assume_aligned(ptr %2)
%4 = load ptr, ptr %0, align 8
%5 = getelementptr i8, ptr %4, i64 4
store ptr %5, ptr %0, align 8
ret i32 %3
}

define i32 @load_assume_aligned(ptr %0) {
; CHECK-LABEL: define i32 @load_assume_aligned(
; CHECK-SAME: ptr readonly captures(none) [[TMP0:%.*]]) local_unnamed_addr {
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP0]], i64 4) ]
; CHECK-NEXT: [[DOT0_COPYLOAD:%.*]] = load i32, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD]])
; CHECK-NEXT: ret i32 [[TMP2]]
;
call void @llvm.assume(i1 true) [ "align"(ptr %0, i64 4) ]
%.0.copyload = load i32, ptr %0, align 1
%2 = call i32 @swap(i32 %.0.copyload)
ret i32 %2
}

declare i32 @swap(i32)