-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[IR] Check identical alignment for atomic instructions #155349
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
@llvm/pr-subscribers-llvm-analysis @llvm/pr-subscribers-llvm-ir Author: Ellis Hoag (ellishg) ChangesI noticed that Full diff: https://github.com/llvm/llvm-project/pull/155349.diff 2 Files Affected:
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 5e87b5ff941ad..6010e2a99cf01 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -864,7 +864,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
bool Instruction::hasSameSpecialState(const Instruction *I2,
bool IgnoreAlignment,
bool IntersectAttrs) const {
- auto I1 = this;
+ const auto *I1 = this;
assert(I1->getOpcode() == I2->getOpcode() &&
"Can not compare special state of different instructions");
@@ -917,6 +917,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2,
FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
+ (CXI->getAlign() == cast<AtomicCmpXchgInst>(I2)->getAlign() ||
+ IgnoreAlignment) &&
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
CXI->getSuccessOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
@@ -927,6 +929,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2,
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
+ (RMWI->getAlign() == cast<AtomicRMWInst>(I2)->getAlign() ||
+ IgnoreAlignment) &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
index 03009d53d63f4..067fec85e182b 100644
--- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
+++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
@@ -19,11 +19,14 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SourceMgr.h"
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace llvm;
using namespace IRSimilarity;
+using testing::SizeIs;
+
static std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
StringRef ModuleStr) {
SMDiagnostic Err;
@@ -730,6 +733,162 @@ TEST(IRInstructionMapper, StoreDifferentAtomic) {
ASSERT_TRUE(UnsignedVec[0] != UnsignedVec[1]);
}
+// Checks that atomicrmw that have the different types are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentType) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i64* %b) {
+ bb0:
+ %1 = atomicrmw add i32* %a, i32 1 acquire
+ %2 = atomicrmw add i64* %b, i64 1 acquire
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that atomicrmw that have the different aligns are mapped to different
+// unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentAlign) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = atomicrmw add i32* %a, i32 1 acquire, align 4
+ %2 = atomicrmw add i32* %b, i32 1 acquire, align 8
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that atomicrmw that have the different volatile settings are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = atomicrmw volatile add i32* %a, i32 1 acquire
+ %2 = atomicrmw add i32* %b, i32 1 acquire
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different types are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i64* %b) {
+ bb0:
+ %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic
+ %2 = cmpxchg i64* %b, i64 0, i64 1 monotonic monotonic
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different aligns are mapped to different
+// unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic, align 4
+ %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic, align 8
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different volatile settings are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentVolatile) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = cmpxchg volatile i32* %a, i32 0, i32 1 monotonic monotonic
+ %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
// Checks that the branch is mapped to legal when the option is set.
TEST(IRInstructionMapper, BranchLegal) {
StringRef ModuleString = R"(
|
// different unsigned integers. | ||
TEST(IRInstructionMapper, AtomicRMWDifferentType) { | ||
StringRef ModuleString = R"( | ||
define i32 @f(i32* %a, i64* %b) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
define i32 @f(i32* %a, i64* %b) { | |
define i32 @f(ptr %a, ptr %b) { |
etc.
I noticed that
hasSameSpecialState()
checks alignment forload
/store
instructions, but not forcmpxchg
oratomicrmw
, which I assume is a bug. It looks like alignment for these instructions were added in 74c7237.