-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[msan][NFCI] Refactor visitIntrinsicInst() into instruction families #154878
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Currently it's a long, partly unsorted list. This patch separates them into helper functions, making the overall intent of visitIntrinsicInst() clearer: ``` void visitIntrinsicInst(IntrinsicInst &I) { if (!maybeHandleCrossPlatformIntrinsic(I)) if (!maybeHandleX86SIMDIntrinsic(I)) if (!maybeHandleArmSIMDIntrinsic(I)) if (!maybeHandleUnknownIntrinsic(I)) visitInstruction(I); } ```
@llvm/pr-subscribers-llvm-transforms Author: Thurston Dang (thurstond) ChangesCurrently it's a long, partly unsorted list. This patch separates them into helper functions, making the overall intent of visitIntrinsicInst() clearer:
There is one disadvantage: the compiler will not tell us if the switch statements in the handlers have overlapping coverage. Full diff: https://github.com/llvm/llvm-project/pull/154878.diff 1 Files Affected:
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 27292d1a66c30..28f47722e7ed3 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3263,7 +3263,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return true;
}
- /// Heuristically instrument unknown intrinsics.
+ /// Returns whether it was able to heuristically instrument unknown
+ /// intrinsics.
///
/// The main purpose of this code is to do something reasonable with all
/// random intrinsics we might encounter, most importantly - SIMD intrinsics.
@@ -3273,7 +3274,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
///
/// We special-case intrinsics where this approach fails. See llvm.bswap
/// handling as an example of that.
- bool handleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
+ bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
unsigned NumArgOperands = I.arg_size();
if (NumArgOperands == 0)
return false;
@@ -3300,8 +3301,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return false;
}
- bool handleUnknownIntrinsic(IntrinsicInst &I) {
- if (handleUnknownIntrinsicUnlogged(I)) {
+ bool maybeHandleUnknownIntrinsic(IntrinsicInst &I) {
+ if (maybeHandleUnknownIntrinsicUnlogged(I)) {
if (ClDumpHeuristicInstructions)
dumpInst(I);
@@ -5262,7 +5263,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleShadowOr(I);
}
- void visitIntrinsicInst(IntrinsicInst &I) {
+ bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
@@ -5342,6 +5343,32 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleVectorReduceWithStarterIntrinsic(I);
break;
+ case Intrinsic::scmp:
+ case Intrinsic::ucmp: {
+ handleShadowOr(I);
+ break;
+ }
+
+ case Intrinsic::fshl:
+ case Intrinsic::fshr:
+ handleFunnelShift(I);
+ break;
+
+ case Intrinsic::is_constant:
+ // The result of llvm.is.constant() is always defined.
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+ }
+
+ bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &I) {
+ switch (I.getIntrinsicID()) {
case Intrinsic::x86_sse_stmxcsr:
handleStmxcsr(I);
break;
@@ -5392,6 +5419,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
break;
}
+ // Convert Packed Single Precision Floating-Point Values
+ // to Packed Signed Doubleword Integer Values
+ //
+ // <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
+ // (<16 x float>, <16 x i32>, i16, i32)
+ case Intrinsic::x86_avx512_mask_cvtps2dq_512:
+ handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
+ break;
+
// Convert Packed Double Precision Floating-Point Values
// to Packed Single Precision Floating-Point Values
case Intrinsic::x86_sse2_cvtpd2ps:
@@ -5492,23 +5528,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case Intrinsic::x86_mmx_psrli_q:
case Intrinsic::x86_mmx_psrai_w:
case Intrinsic::x86_mmx_psrai_d:
- case Intrinsic::aarch64_neon_rshrn:
- case Intrinsic::aarch64_neon_sqrshl:
- case Intrinsic::aarch64_neon_sqrshrn:
- case Intrinsic::aarch64_neon_sqrshrun:
- case Intrinsic::aarch64_neon_sqshl:
- case Intrinsic::aarch64_neon_sqshlu:
- case Intrinsic::aarch64_neon_sqshrn:
- case Intrinsic::aarch64_neon_sqshrun:
- case Intrinsic::aarch64_neon_srshl:
- case Intrinsic::aarch64_neon_sshl:
- case Intrinsic::aarch64_neon_uqrshl:
- case Intrinsic::aarch64_neon_uqrshrn:
- case Intrinsic::aarch64_neon_uqshl:
- case Intrinsic::aarch64_neon_uqshrn:
- case Intrinsic::aarch64_neon_urshl:
- case Intrinsic::aarch64_neon_ushl:
- // Not handled here: aarch64_neon_vsli (vector shift left and insert)
handleVectorShiftIntrinsic(I, /* Variable */ false);
break;
case Intrinsic::x86_avx2_psllv_d:
@@ -5930,7 +5949,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case Intrinsic::x86_avx512_max_pd_512: {
// These AVX512 variants contain the rounding mode as a trailing flag.
// Earlier variants do not have a trailing flag and are already handled
- // by maybeHandleSimpleNomemIntrinsic(I, 0) via handleUnknownIntrinsic.
+ // by maybeHandleSimpleNomemIntrinsic(I, 0) via
+ // maybeHandleUnknownIntrinsic.
[[maybe_unused]] bool Success =
maybeHandleSimpleNomemIntrinsic(I, /*trailingFlags=*/1);
assert(Success);
@@ -5988,15 +6008,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/*trailingVerbatimArgs=*/1);
break;
- // Convert Packed Single Precision Floating-Point Values
- // to Packed Signed Doubleword Integer Values
- //
- // <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
- // (<16 x float>, <16 x i32>, i16, i32)
- case Intrinsic::x86_avx512_mask_cvtps2dq_512:
- handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
- break;
-
// AVX512 PMOV: Packed MOV, with truncation
// Precisely handled by applying the same intrinsic to the shadow
case Intrinsic::x86_avx512_mask_pmov_dw_512:
@@ -6074,15 +6085,33 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleAVXGF2P8Affine(I);
break;
- case Intrinsic::fshl:
- case Intrinsic::fshr:
- handleFunnelShift(I);
- break;
+ default:
+ return false;
+ }
- case Intrinsic::is_constant:
- // The result of llvm.is.constant() is always defined.
- setShadow(&I, getCleanShadow(&I));
- setOrigin(&I, getCleanOrigin());
+ return true;
+ }
+
+ bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &I) {
+ switch (I.getIntrinsicID()) {
+ case Intrinsic::aarch64_neon_rshrn:
+ case Intrinsic::aarch64_neon_sqrshl:
+ case Intrinsic::aarch64_neon_sqrshrn:
+ case Intrinsic::aarch64_neon_sqrshrun:
+ case Intrinsic::aarch64_neon_sqshl:
+ case Intrinsic::aarch64_neon_sqshlu:
+ case Intrinsic::aarch64_neon_sqshrn:
+ case Intrinsic::aarch64_neon_sqshrun:
+ case Intrinsic::aarch64_neon_srshl:
+ case Intrinsic::aarch64_neon_sshl:
+ case Intrinsic::aarch64_neon_uqrshl:
+ case Intrinsic::aarch64_neon_uqrshrn:
+ case Intrinsic::aarch64_neon_uqshl:
+ case Intrinsic::aarch64_neon_uqshrn:
+ case Intrinsic::aarch64_neon_urshl:
+ case Intrinsic::aarch64_neon_ushl:
+ // Not handled here: aarch64_neon_vsli (vector shift left and insert)
+ handleVectorShiftIntrinsic(I, /* Variable */ false);
break;
// TODO: handling max/min similarly to AND/OR may be more precise
@@ -6233,17 +6262,19 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
break;
}
- case Intrinsic::scmp:
- case Intrinsic::ucmp: {
- handleShadowOr(I);
- break;
- }
-
default:
- if (!handleUnknownIntrinsic(I))
- visitInstruction(I);
- break;
+ return false;
}
+
+ return true;
+ }
+
+ void visitIntrinsicInst(IntrinsicInst &I) {
+ if (!maybeHandleCrossPlatformIntrinsic(I))
+ if (!maybeHandleX86SIMDIntrinsic(I))
+ if (!maybeHandleArmSIMDIntrinsic(I))
+ if (!maybeHandleUnknownIntrinsic(I))
+ visitInstruction(I);
}
void visitLibAtomicLoad(CallBase &CB) {
|
@llvm/pr-subscribers-compiler-rt-sanitizer Author: Thurston Dang (thurstond) ChangesCurrently it's a long, partly unsorted list. This patch separates them into helper functions, making the overall intent of visitIntrinsicInst() clearer:
There is one disadvantage: the compiler will not tell us if the switch statements in the handlers have overlapping coverage. Full diff: https://github.com/llvm/llvm-project/pull/154878.diff 1 Files Affected:
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 27292d1a66c30..28f47722e7ed3 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3263,7 +3263,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return true;
}
- /// Heuristically instrument unknown intrinsics.
+ /// Returns whether it was able to heuristically instrument unknown
+ /// intrinsics.
///
/// The main purpose of this code is to do something reasonable with all
/// random intrinsics we might encounter, most importantly - SIMD intrinsics.
@@ -3273,7 +3274,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
///
/// We special-case intrinsics where this approach fails. See llvm.bswap
/// handling as an example of that.
- bool handleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
+ bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
unsigned NumArgOperands = I.arg_size();
if (NumArgOperands == 0)
return false;
@@ -3300,8 +3301,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
return false;
}
- bool handleUnknownIntrinsic(IntrinsicInst &I) {
- if (handleUnknownIntrinsicUnlogged(I)) {
+ bool maybeHandleUnknownIntrinsic(IntrinsicInst &I) {
+ if (maybeHandleUnknownIntrinsicUnlogged(I)) {
if (ClDumpHeuristicInstructions)
dumpInst(I);
@@ -5262,7 +5263,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleShadowOr(I);
}
- void visitIntrinsicInst(IntrinsicInst &I) {
+ bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &I) {
switch (I.getIntrinsicID()) {
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
@@ -5342,6 +5343,32 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleVectorReduceWithStarterIntrinsic(I);
break;
+ case Intrinsic::scmp:
+ case Intrinsic::ucmp: {
+ handleShadowOr(I);
+ break;
+ }
+
+ case Intrinsic::fshl:
+ case Intrinsic::fshr:
+ handleFunnelShift(I);
+ break;
+
+ case Intrinsic::is_constant:
+ // The result of llvm.is.constant() is always defined.
+ setShadow(&I, getCleanShadow(&I));
+ setOrigin(&I, getCleanOrigin());
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+ }
+
+ bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &I) {
+ switch (I.getIntrinsicID()) {
case Intrinsic::x86_sse_stmxcsr:
handleStmxcsr(I);
break;
@@ -5392,6 +5419,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
break;
}
+ // Convert Packed Single Precision Floating-Point Values
+ // to Packed Signed Doubleword Integer Values
+ //
+ // <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
+ // (<16 x float>, <16 x i32>, i16, i32)
+ case Intrinsic::x86_avx512_mask_cvtps2dq_512:
+ handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
+ break;
+
// Convert Packed Double Precision Floating-Point Values
// to Packed Single Precision Floating-Point Values
case Intrinsic::x86_sse2_cvtpd2ps:
@@ -5492,23 +5528,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case Intrinsic::x86_mmx_psrli_q:
case Intrinsic::x86_mmx_psrai_w:
case Intrinsic::x86_mmx_psrai_d:
- case Intrinsic::aarch64_neon_rshrn:
- case Intrinsic::aarch64_neon_sqrshl:
- case Intrinsic::aarch64_neon_sqrshrn:
- case Intrinsic::aarch64_neon_sqrshrun:
- case Intrinsic::aarch64_neon_sqshl:
- case Intrinsic::aarch64_neon_sqshlu:
- case Intrinsic::aarch64_neon_sqshrn:
- case Intrinsic::aarch64_neon_sqshrun:
- case Intrinsic::aarch64_neon_srshl:
- case Intrinsic::aarch64_neon_sshl:
- case Intrinsic::aarch64_neon_uqrshl:
- case Intrinsic::aarch64_neon_uqrshrn:
- case Intrinsic::aarch64_neon_uqshl:
- case Intrinsic::aarch64_neon_uqshrn:
- case Intrinsic::aarch64_neon_urshl:
- case Intrinsic::aarch64_neon_ushl:
- // Not handled here: aarch64_neon_vsli (vector shift left and insert)
handleVectorShiftIntrinsic(I, /* Variable */ false);
break;
case Intrinsic::x86_avx2_psllv_d:
@@ -5930,7 +5949,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case Intrinsic::x86_avx512_max_pd_512: {
// These AVX512 variants contain the rounding mode as a trailing flag.
// Earlier variants do not have a trailing flag and are already handled
- // by maybeHandleSimpleNomemIntrinsic(I, 0) via handleUnknownIntrinsic.
+ // by maybeHandleSimpleNomemIntrinsic(I, 0) via
+ // maybeHandleUnknownIntrinsic.
[[maybe_unused]] bool Success =
maybeHandleSimpleNomemIntrinsic(I, /*trailingFlags=*/1);
assert(Success);
@@ -5988,15 +6008,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/*trailingVerbatimArgs=*/1);
break;
- // Convert Packed Single Precision Floating-Point Values
- // to Packed Signed Doubleword Integer Values
- //
- // <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
- // (<16 x float>, <16 x i32>, i16, i32)
- case Intrinsic::x86_avx512_mask_cvtps2dq_512:
- handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
- break;
-
// AVX512 PMOV: Packed MOV, with truncation
// Precisely handled by applying the same intrinsic to the shadow
case Intrinsic::x86_avx512_mask_pmov_dw_512:
@@ -6074,15 +6085,33 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
handleAVXGF2P8Affine(I);
break;
- case Intrinsic::fshl:
- case Intrinsic::fshr:
- handleFunnelShift(I);
- break;
+ default:
+ return false;
+ }
- case Intrinsic::is_constant:
- // The result of llvm.is.constant() is always defined.
- setShadow(&I, getCleanShadow(&I));
- setOrigin(&I, getCleanOrigin());
+ return true;
+ }
+
+ bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &I) {
+ switch (I.getIntrinsicID()) {
+ case Intrinsic::aarch64_neon_rshrn:
+ case Intrinsic::aarch64_neon_sqrshl:
+ case Intrinsic::aarch64_neon_sqrshrn:
+ case Intrinsic::aarch64_neon_sqrshrun:
+ case Intrinsic::aarch64_neon_sqshl:
+ case Intrinsic::aarch64_neon_sqshlu:
+ case Intrinsic::aarch64_neon_sqshrn:
+ case Intrinsic::aarch64_neon_sqshrun:
+ case Intrinsic::aarch64_neon_srshl:
+ case Intrinsic::aarch64_neon_sshl:
+ case Intrinsic::aarch64_neon_uqrshl:
+ case Intrinsic::aarch64_neon_uqrshrn:
+ case Intrinsic::aarch64_neon_uqshl:
+ case Intrinsic::aarch64_neon_uqshrn:
+ case Intrinsic::aarch64_neon_urshl:
+ case Intrinsic::aarch64_neon_ushl:
+ // Not handled here: aarch64_neon_vsli (vector shift left and insert)
+ handleVectorShiftIntrinsic(I, /* Variable */ false);
break;
// TODO: handling max/min similarly to AND/OR may be more precise
@@ -6233,17 +6262,19 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
break;
}
- case Intrinsic::scmp:
- case Intrinsic::ucmp: {
- handleShadowOr(I);
- break;
- }
-
default:
- if (!handleUnknownIntrinsic(I))
- visitInstruction(I);
- break;
+ return false;
}
+
+ return true;
+ }
+
+ void visitIntrinsicInst(IntrinsicInst &I) {
+ if (!maybeHandleCrossPlatformIntrinsic(I))
+ if (!maybeHandleX86SIMDIntrinsic(I))
+ if (!maybeHandleArmSIMDIntrinsic(I))
+ if (!maybeHandleUnknownIntrinsic(I))
+ visitInstruction(I);
}
void visitLibAtomicLoad(CallBase &CB) {
|
|
||
void visitIntrinsicInst(IntrinsicInst &I) { | ||
if (!maybeHandleCrossPlatformIntrinsic(I)) | ||
if (!maybeHandleX86SIMDIntrinsic(I)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why not
if (maybeHandleCrossPlatformIntrinsic(I))
return;
if (maybeHandleX86SIMDIntrinsic(I))
return;
[...]
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/162/builds/29680 Here is the relevant piece of the build log for the reference
|
Currently visitIntrinsicInst() is a long, partly unsorted list. This patch groups them into cross-platform, X86 SIMD, and Arm SIMD families, making the overall intent of visitIntrinsicInst() clearer:
There is one disadvantage: the compiler will not tell us if the switch statements in the handlers have overlapping coverage.