From e687f1f0d2085a093569d2b2eaa7cf24c03a8924 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Mon, 25 Aug 2025 20:17:28 -0700 Subject: [PATCH 1/4] Check identical alignment for atomic instrs --- llvm/lib/IR/Instruction.cpp | 6 +- .../Analysis/IRSimilarityIdentifierTest.cpp | 159 ++++++++++++++++++ 2 files changed, 164 insertions(+), 1 deletion(-) diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index 5e87b5ff941ad..6010e2a99cf01 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -864,7 +864,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) { bool Instruction::hasSameSpecialState(const Instruction *I2, bool IgnoreAlignment, bool IntersectAttrs) const { - auto I1 = this; + const auto *I1 = this; assert(I1->getOpcode() == I2->getOpcode() && "Can not compare special state of different instructions"); @@ -917,6 +917,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2, FI->getSyncScopeID() == cast(I2)->getSyncScopeID(); if (const AtomicCmpXchgInst *CXI = dyn_cast(I1)) return CXI->isVolatile() == cast(I2)->isVolatile() && + (CXI->getAlign() == cast(I2)->getAlign() || + IgnoreAlignment) && CXI->isWeak() == cast(I2)->isWeak() && CXI->getSuccessOrdering() == cast(I2)->getSuccessOrdering() && @@ -927,6 +929,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2, if (const AtomicRMWInst *RMWI = dyn_cast(I1)) return RMWI->getOperation() == cast(I2)->getOperation() && RMWI->isVolatile() == cast(I2)->isVolatile() && + (RMWI->getAlign() == cast(I2)->getAlign() || + IgnoreAlignment) && RMWI->getOrdering() == cast(I2)->getOrdering() && RMWI->getSyncScopeID() == cast(I2)->getSyncScopeID(); if (const ShuffleVectorInst *SVI = dyn_cast(I1)) diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp index 03009d53d63f4..067fec85e182b 100644 --- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp +++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp @@ -19,11 +19,14 @@ #include "llvm/Support/Allocator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/SourceMgr.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" using namespace llvm; using namespace IRSimilarity; +using testing::SizeIs; + static std::unique_ptr makeLLVMModule(LLVMContext &Context, StringRef ModuleStr) { SMDiagnostic Err; @@ -730,6 +733,162 @@ TEST(IRInstructionMapper, StoreDifferentAtomic) { ASSERT_TRUE(UnsignedVec[0] != UnsignedVec[1]); } +// Checks that atomicrmw that have the different types are mapped to +// different unsigned integers. +TEST(IRInstructionMapper, AtomicRMWDifferentType) { + StringRef ModuleString = R"( + define i32 @f(i32* %a, i64* %b) { + bb0: + %1 = atomicrmw add i32* %a, i32 1 acquire + %2 = atomicrmw add i64* %b, i64 1 acquire + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that atomicrmw that have the different aligns are mapped to different +// unsigned integers. +TEST(IRInstructionMapper, AtomicRMWDifferentAlign) { + StringRef ModuleString = R"( + define i32 @f(i32* %a, i32* %b) { + bb0: + %1 = atomicrmw add i32* %a, i32 1 acquire, align 4 + %2 = atomicrmw add i32* %b, i32 1 acquire, align 8 + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that atomicrmw that have the different volatile settings are mapped to +// different unsigned integers. +TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) { + StringRef ModuleString = R"( + define i32 @f(i32* %a, i32* %b) { + bb0: + %1 = atomicrmw volatile add i32* %a, i32 1 acquire + %2 = atomicrmw add i32* %b, i32 1 acquire + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that cmpxchg that have the different types are mapped to +// different unsigned integers. +TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) { + StringRef ModuleString = R"( + define i32 @f(i32* %a, i64* %b) { + bb0: + %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic + %2 = cmpxchg i64* %b, i64 0, i64 1 monotonic monotonic + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that cmpxchg that have the different aligns are mapped to different +// unsigned integers. +TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) { + StringRef ModuleString = R"( + define i32 @f(i32* %a, i32* %b) { + bb0: + %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic, align 4 + %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic, align 8 + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that cmpxchg that have the different volatile settings are mapped to +// different unsigned integers. +TEST(IRInstructionMapper, AtomicCmpXchgDifferentVolatile) { + StringRef ModuleString = R"( + define i32 @f(i32* %a, i32* %b) { + bb0: + %1 = cmpxchg volatile i32* %a, i32 0, i32 1 monotonic monotonic + %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + // Checks that the branch is mapped to legal when the option is set. TEST(IRInstructionMapper, BranchLegal) { StringRef ModuleString = R"( From 77440a90f339ab2b0563198dcfc57c3712d39fa7 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Tue, 26 Aug 2025 06:52:42 -0700 Subject: [PATCH 2/4] use ptr --- .../Analysis/IRSimilarityIdentifierTest.cpp | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp index 067fec85e182b..aa26a8e3469b4 100644 --- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp +++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp @@ -737,10 +737,10 @@ TEST(IRInstructionMapper, StoreDifferentAtomic) { // different unsigned integers. TEST(IRInstructionMapper, AtomicRMWDifferentType) { StringRef ModuleString = R"( - define i32 @f(i32* %a, i64* %b) { + define i32 @f(ptr %a, ptr %b) { bb0: - %1 = atomicrmw add i32* %a, i32 1 acquire - %2 = atomicrmw add i64* %b, i64 1 acquire + %1 = atomicrmw add ptr %a, i32 1 acquire + %2 = atomicrmw add ptr %b, i64 1 acquire ret i32 0 })"; LLVMContext Context; @@ -763,10 +763,10 @@ TEST(IRInstructionMapper, AtomicRMWDifferentType) { // unsigned integers. TEST(IRInstructionMapper, AtomicRMWDifferentAlign) { StringRef ModuleString = R"( - define i32 @f(i32* %a, i32* %b) { + define i32 @f(ptr %a, ptr %b) { bb0: - %1 = atomicrmw add i32* %a, i32 1 acquire, align 4 - %2 = atomicrmw add i32* %b, i32 1 acquire, align 8 + %1 = atomicrmw add ptr %a, i32 1 acquire, align 4 + %2 = atomicrmw add ptr %b, i32 1 acquire, align 8 ret i32 0 })"; LLVMContext Context; @@ -789,10 +789,10 @@ TEST(IRInstructionMapper, AtomicRMWDifferentAlign) { // different unsigned integers. TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) { StringRef ModuleString = R"( - define i32 @f(i32* %a, i32* %b) { + define i32 @f(ptr %a, ptr %b) { bb0: - %1 = atomicrmw volatile add i32* %a, i32 1 acquire - %2 = atomicrmw add i32* %b, i32 1 acquire + %1 = atomicrmw volatile add ptr %a, i32 1 acquire + %2 = atomicrmw add ptr %b, i32 1 acquire ret i32 0 })"; LLVMContext Context; @@ -815,10 +815,10 @@ TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) { // different unsigned integers. TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) { StringRef ModuleString = R"( - define i32 @f(i32* %a, i64* %b) { + define i32 @f(ptr %a, ptr %b) { bb0: - %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic - %2 = cmpxchg i64* %b, i64 0, i64 1 monotonic monotonic + %1 = cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic + %2 = cmpxchg ptr %b, i64 0, i64 1 monotonic monotonic ret i32 0 })"; LLVMContext Context; @@ -841,10 +841,10 @@ TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) { // unsigned integers. TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) { StringRef ModuleString = R"( - define i32 @f(i32* %a, i32* %b) { + define i32 @f(ptr %a, ptr %b) { bb0: - %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic, align 4 - %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic, align 8 + %1 = cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic, align 4 + %2 = cmpxchg ptr %b, i32 0, i32 1 monotonic monotonic, align 8 ret i32 0 })"; LLVMContext Context; @@ -867,10 +867,10 @@ TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) { // different unsigned integers. TEST(IRInstructionMapper, AtomicCmpXchgDifferentVolatile) { StringRef ModuleString = R"( - define i32 @f(i32* %a, i32* %b) { + define i32 @f(ptr %a, ptr %b) { bb0: - %1 = cmpxchg volatile i32* %a, i32 0, i32 1 monotonic monotonic - %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic + %1 = cmpxchg volatile ptr %a, i32 0, i32 1 monotonic monotonic + %2 = cmpxchg ptr %b, i32 0, i32 1 monotonic monotonic ret i32 0 })"; LLVMContext Context; From e44fabffdef3f0e3859c8ea1b51038a19d3b2da4 Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Thu, 18 Sep 2025 13:27:04 -0700 Subject: [PATCH 3/4] add iroutliner test --- .../IROutliner/outlining-special-state.ll | 163 ++++++++++++++++++ 1 file changed, 163 insertions(+) create mode 100644 llvm/test/Transforms/IROutliner/outlining-special-state.ll diff --git a/llvm/test/Transforms/IROutliner/outlining-special-state.ll b/llvm/test/Transforms/IROutliner/outlining-special-state.ll new file mode 100644 index 0000000000000..9ceec51895351 --- /dev/null +++ b/llvm/test/Transforms/IROutliner/outlining-special-state.ll @@ -0,0 +1,163 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --include-generated-funcs +; RUN: opt -S -passes=verify,iroutliner -ir-outlining-no-cost < %s | FileCheck %s + +declare void @foo(); + +define void @atomicrmw_base(ptr %p) { +entry: + %1 = atomicrmw add ptr %p, i32 1 acquire, align 8 + call void @foo() + ret void +} + +define void @atomicrmw_copy(ptr %p) { +entry: + %1 = atomicrmw add ptr %p, i32 1 acquire, align 8 + call void @foo() + ret void +} + +define void @atomicrmw_wrong_type(ptr %p) { +entry: + %1 = atomicrmw add ptr %p, i64 1 acquire, align 8 + call void @foo() + ret void +} + +define void @atomicrmw_wrong_align(ptr %p) { +entry: + %1 = atomicrmw add ptr %p, i32 1 acquire, align 4 + call void @foo() + ret void +} + +define void @atomicrmw_wrong_volatile(ptr %p) { +entry: + %1 = atomicrmw volatile add ptr %p, i32 1 acquire, align 8 + call void @foo() + ret void +} + +define void @cmpxchg_base(ptr %p) { +entry: + %1 = cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic, align 8 + call void @foo() + ret void +} + +define void @cmpxchg_copy(ptr %p) { +entry: + %1 = cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic, align 8 + call void @foo() + ret void +} + +define void @cmpxchg_wrong_type(ptr %p) { +entry: + %1 = cmpxchg ptr %p, i64 0, i64 1 monotonic monotonic, align 8 + call void @foo() + ret void +} + +define void @cmpxchg_wrong_align(ptr %p) { +entry: + %1 = cmpxchg ptr %p, i32 0, i32 1 monotonic monotonic, align 4 + call void @foo() + ret void +} + +define void @cmpxchg_wrong_volatile(ptr %p) { +entry: + %1 = cmpxchg volatile ptr %p, i32 0, i32 1 monotonic monotonic, align 8 + call void @foo() + ret void +} + + +; CHECK-LABEL: @atomicrmw_base( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[P:%.*]]) +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @atomicrmw_copy( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @outlined_ir_func_1(ptr [[P:%.*]]) +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @atomicrmw_wrong_type( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[P:%.*]], i64 1 acquire, align 8 +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @atomicrmw_wrong_align( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[P:%.*]], i32 1 acquire, align 4 +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @atomicrmw_wrong_volatile( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = atomicrmw volatile add ptr [[P:%.*]], i32 1 acquire, align 8 +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @cmpxchg_base( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[P:%.*]]) +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @cmpxchg_copy( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[P:%.*]]) +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @cmpxchg_wrong_type( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg ptr [[P:%.*]], i64 0, i64 1 monotonic monotonic, align 8 +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @cmpxchg_wrong_align( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg ptr [[P:%.*]], i32 0, i32 1 monotonic monotonic, align 4 +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @cmpxchg_wrong_volatile( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg volatile ptr [[P:%.*]], i32 0, i32 1 monotonic monotonic, align 8 +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @outlined_ir_func_0( +; CHECK-NEXT: newFuncRoot: +; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]] +; CHECK: entry_to_outline: +; CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[TMP0:%.*]], i32 0, i32 1 monotonic monotonic, align 8 +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]] +; CHECK: entry_after_outline.exitStub: +; CHECK-NEXT: ret void +; +; +; CHECK-LABEL: @outlined_ir_func_1( +; CHECK-NEXT: newFuncRoot: +; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]] +; CHECK: entry_to_outline: +; CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr [[TMP0:%.*]], i32 1 acquire, align 8 +; CHECK-NEXT: call void @foo() +; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]] +; CHECK: entry_after_outline.exitStub: +; CHECK-NEXT: ret void +; From e17d7ecfafe5fa525e25889ed788927c55fb25ae Mon Sep 17 00:00:00 2001 From: Ellis Hoag Date: Fri, 19 Sep 2025 10:48:06 -0700 Subject: [PATCH 4/4] Remove new unittests --- .../Analysis/IRSimilarityIdentifierTest.cpp | 162 +----------------- 1 file changed, 3 insertions(+), 159 deletions(-) diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp index aa26a8e3469b4..fa451fab67549 100644 --- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp +++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp @@ -19,14 +19,11 @@ #include "llvm/Support/Allocator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/SourceMgr.h" -#include "gmock/gmock.h" #include "gtest/gtest.h" using namespace llvm; using namespace IRSimilarity; -using testing::SizeIs; - static std::unique_ptr makeLLVMModule(LLVMContext &Context, StringRef ModuleStr) { SMDiagnostic Err; @@ -52,6 +49,9 @@ void getSimilarities( SimilarityCandidates = Identifier.findSimilarity(M); } +// TODO: All these tests could probably become IR LIT tests like +// IROutliner/outlining-special-state.ll + // Checks that different opcodes are mapped to different values TEST(IRInstructionMapper, OpcodeDifferentiation) { StringRef ModuleString = R"( @@ -733,162 +733,6 @@ TEST(IRInstructionMapper, StoreDifferentAtomic) { ASSERT_TRUE(UnsignedVec[0] != UnsignedVec[1]); } -// Checks that atomicrmw that have the different types are mapped to -// different unsigned integers. -TEST(IRInstructionMapper, AtomicRMWDifferentType) { - StringRef ModuleString = R"( - define i32 @f(ptr %a, ptr %b) { - bb0: - %1 = atomicrmw add ptr %a, i32 1 acquire - %2 = atomicrmw add ptr %b, i64 1 acquire - ret i32 0 - })"; - LLVMContext Context; - std::unique_ptr M = makeLLVMModule(Context, ModuleString); - - std::vector InstrList; - std::vector UnsignedVec; - - SpecificBumpPtrAllocator InstDataAllocator; - SpecificBumpPtrAllocator IDLAllocator; - IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); - getVectors(*M, Mapper, InstrList, UnsignedVec); - - ASSERT_EQ(InstrList.size(), UnsignedVec.size()); - ASSERT_THAT(UnsignedVec, SizeIs(3)); - EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); -} - -// Checks that atomicrmw that have the different aligns are mapped to different -// unsigned integers. -TEST(IRInstructionMapper, AtomicRMWDifferentAlign) { - StringRef ModuleString = R"( - define i32 @f(ptr %a, ptr %b) { - bb0: - %1 = atomicrmw add ptr %a, i32 1 acquire, align 4 - %2 = atomicrmw add ptr %b, i32 1 acquire, align 8 - ret i32 0 - })"; - LLVMContext Context; - std::unique_ptr M = makeLLVMModule(Context, ModuleString); - - std::vector InstrList; - std::vector UnsignedVec; - - SpecificBumpPtrAllocator InstDataAllocator; - SpecificBumpPtrAllocator IDLAllocator; - IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); - getVectors(*M, Mapper, InstrList, UnsignedVec); - - ASSERT_EQ(InstrList.size(), UnsignedVec.size()); - ASSERT_THAT(UnsignedVec, SizeIs(3)); - EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); -} - -// Checks that atomicrmw that have the different volatile settings are mapped to -// different unsigned integers. -TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) { - StringRef ModuleString = R"( - define i32 @f(ptr %a, ptr %b) { - bb0: - %1 = atomicrmw volatile add ptr %a, i32 1 acquire - %2 = atomicrmw add ptr %b, i32 1 acquire - ret i32 0 - })"; - LLVMContext Context; - std::unique_ptr M = makeLLVMModule(Context, ModuleString); - - std::vector InstrList; - std::vector UnsignedVec; - - SpecificBumpPtrAllocator InstDataAllocator; - SpecificBumpPtrAllocator IDLAllocator; - IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); - getVectors(*M, Mapper, InstrList, UnsignedVec); - - ASSERT_EQ(InstrList.size(), UnsignedVec.size()); - ASSERT_THAT(UnsignedVec, SizeIs(3)); - EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); -} - -// Checks that cmpxchg that have the different types are mapped to -// different unsigned integers. -TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) { - StringRef ModuleString = R"( - define i32 @f(ptr %a, ptr %b) { - bb0: - %1 = cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic - %2 = cmpxchg ptr %b, i64 0, i64 1 monotonic monotonic - ret i32 0 - })"; - LLVMContext Context; - std::unique_ptr M = makeLLVMModule(Context, ModuleString); - - std::vector InstrList; - std::vector UnsignedVec; - - SpecificBumpPtrAllocator InstDataAllocator; - SpecificBumpPtrAllocator IDLAllocator; - IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); - getVectors(*M, Mapper, InstrList, UnsignedVec); - - ASSERT_EQ(InstrList.size(), UnsignedVec.size()); - ASSERT_THAT(UnsignedVec, SizeIs(3)); - EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); -} - -// Checks that cmpxchg that have the different aligns are mapped to different -// unsigned integers. -TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) { - StringRef ModuleString = R"( - define i32 @f(ptr %a, ptr %b) { - bb0: - %1 = cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic, align 4 - %2 = cmpxchg ptr %b, i32 0, i32 1 monotonic monotonic, align 8 - ret i32 0 - })"; - LLVMContext Context; - std::unique_ptr M = makeLLVMModule(Context, ModuleString); - - std::vector InstrList; - std::vector UnsignedVec; - - SpecificBumpPtrAllocator InstDataAllocator; - SpecificBumpPtrAllocator IDLAllocator; - IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); - getVectors(*M, Mapper, InstrList, UnsignedVec); - - ASSERT_EQ(InstrList.size(), UnsignedVec.size()); - ASSERT_THAT(UnsignedVec, SizeIs(3)); - EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); -} - -// Checks that cmpxchg that have the different volatile settings are mapped to -// different unsigned integers. -TEST(IRInstructionMapper, AtomicCmpXchgDifferentVolatile) { - StringRef ModuleString = R"( - define i32 @f(ptr %a, ptr %b) { - bb0: - %1 = cmpxchg volatile ptr %a, i32 0, i32 1 monotonic monotonic - %2 = cmpxchg ptr %b, i32 0, i32 1 monotonic monotonic - ret i32 0 - })"; - LLVMContext Context; - std::unique_ptr M = makeLLVMModule(Context, ModuleString); - - std::vector InstrList; - std::vector UnsignedVec; - - SpecificBumpPtrAllocator InstDataAllocator; - SpecificBumpPtrAllocator IDLAllocator; - IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); - getVectors(*M, Mapper, InstrList, UnsignedVec); - - ASSERT_EQ(InstrList.size(), UnsignedVec.size()); - ASSERT_THAT(UnsignedVec, SizeIs(3)); - EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); -} - // Checks that the branch is mapped to legal when the option is set. TEST(IRInstructionMapper, BranchLegal) { StringRef ModuleString = R"(