diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index 5e87b5ff941ad..6010e2a99cf01 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -864,7 +864,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) { bool Instruction::hasSameSpecialState(const Instruction *I2, bool IgnoreAlignment, bool IntersectAttrs) const { - auto I1 = this; + const auto *I1 = this; assert(I1->getOpcode() == I2->getOpcode() && "Can not compare special state of different instructions"); @@ -917,6 +917,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2, FI->getSyncScopeID() == cast(I2)->getSyncScopeID(); if (const AtomicCmpXchgInst *CXI = dyn_cast(I1)) return CXI->isVolatile() == cast(I2)->isVolatile() && + (CXI->getAlign() == cast(I2)->getAlign() || + IgnoreAlignment) && CXI->isWeak() == cast(I2)->isWeak() && CXI->getSuccessOrdering() == cast(I2)->getSuccessOrdering() && @@ -927,6 +929,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2, if (const AtomicRMWInst *RMWI = dyn_cast(I1)) return RMWI->getOperation() == cast(I2)->getOperation() && RMWI->isVolatile() == cast(I2)->isVolatile() && + (RMWI->getAlign() == cast(I2)->getAlign() || + IgnoreAlignment) && RMWI->getOrdering() == cast(I2)->getOrdering() && RMWI->getSyncScopeID() == cast(I2)->getSyncScopeID(); if (const ShuffleVectorInst *SVI = dyn_cast(I1)) diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp index 03009d53d63f4..aa26a8e3469b4 100644 --- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp +++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp @@ -19,11 +19,14 @@ #include "llvm/Support/Allocator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/SourceMgr.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" using namespace llvm; using namespace IRSimilarity; +using testing::SizeIs; + static std::unique_ptr makeLLVMModule(LLVMContext &Context, StringRef ModuleStr) { SMDiagnostic Err; @@ -730,6 +733,162 @@ TEST(IRInstructionMapper, StoreDifferentAtomic) { ASSERT_TRUE(UnsignedVec[0] != UnsignedVec[1]); } +// Checks that atomicrmw that have the different types are mapped to +// different unsigned integers. +TEST(IRInstructionMapper, AtomicRMWDifferentType) { + StringRef ModuleString = R"( + define i32 @f(ptr %a, ptr %b) { + bb0: + %1 = atomicrmw add ptr %a, i32 1 acquire + %2 = atomicrmw add ptr %b, i64 1 acquire + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that atomicrmw that have the different aligns are mapped to different +// unsigned integers. +TEST(IRInstructionMapper, AtomicRMWDifferentAlign) { + StringRef ModuleString = R"( + define i32 @f(ptr %a, ptr %b) { + bb0: + %1 = atomicrmw add ptr %a, i32 1 acquire, align 4 + %2 = atomicrmw add ptr %b, i32 1 acquire, align 8 + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that atomicrmw that have the different volatile settings are mapped to +// different unsigned integers. +TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) { + StringRef ModuleString = R"( + define i32 @f(ptr %a, ptr %b) { + bb0: + %1 = atomicrmw volatile add ptr %a, i32 1 acquire + %2 = atomicrmw add ptr %b, i32 1 acquire + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that cmpxchg that have the different types are mapped to +// different unsigned integers. +TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) { + StringRef ModuleString = R"( + define i32 @f(ptr %a, ptr %b) { + bb0: + %1 = cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic + %2 = cmpxchg ptr %b, i64 0, i64 1 monotonic monotonic + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that cmpxchg that have the different aligns are mapped to different +// unsigned integers. +TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) { + StringRef ModuleString = R"( + define i32 @f(ptr %a, ptr %b) { + bb0: + %1 = cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic, align 4 + %2 = cmpxchg ptr %b, i32 0, i32 1 monotonic monotonic, align 8 + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + +// Checks that cmpxchg that have the different volatile settings are mapped to +// different unsigned integers. +TEST(IRInstructionMapper, AtomicCmpXchgDifferentVolatile) { + StringRef ModuleString = R"( + define i32 @f(ptr %a, ptr %b) { + bb0: + %1 = cmpxchg volatile ptr %a, i32 0, i32 1 monotonic monotonic + %2 = cmpxchg ptr %b, i32 0, i32 1 monotonic monotonic + ret i32 0 + })"; + LLVMContext Context; + std::unique_ptr M = makeLLVMModule(Context, ModuleString); + + std::vector InstrList; + std::vector UnsignedVec; + + SpecificBumpPtrAllocator InstDataAllocator; + SpecificBumpPtrAllocator IDLAllocator; + IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator); + getVectors(*M, Mapper, InstrList, UnsignedVec); + + ASSERT_EQ(InstrList.size(), UnsignedVec.size()); + ASSERT_THAT(UnsignedVec, SizeIs(3)); + EXPECT_NE(UnsignedVec[0], UnsignedVec[1]); +} + // Checks that the branch is mapped to legal when the option is set. TEST(IRInstructionMapper, BranchLegal) { StringRef ModuleString = R"(