Skip to content

Conversation

thurstond
Copy link
Contributor

@thurstond thurstond commented Aug 22, 2025

Currently visitIntrinsicInst() is a long, partly unsorted list. This patch groups them into cross-platform, X86 SIMD, and Arm SIMD families, making the overall intent of visitIntrinsicInst() clearer:

  void visitIntrinsicInst(IntrinsicInst &I) {
    if (maybeHandleCrossPlatformIntrinsic(I))
      return;

    if (maybeHandleX86SIMDIntrinsic(I))
      return;

    if (maybeHandleArmSIMDIntrinsic(I))
      return;

    if (maybeHandleUnknownIntrinsic(I))
      return;

    visitInstruction(I);
  }

There is one disadvantage: the compiler will not tell us if the switch statements in the handlers have overlapping coverage.

Currently it's a long, partly unsorted list. This patch separates them
into helper functions, making the overall intent of visitIntrinsicInst()
clearer:

```
 void visitIntrinsicInst(IntrinsicInst &I) {
   if (!maybeHandleCrossPlatformIntrinsic(I))
     if (!maybeHandleX86SIMDIntrinsic(I))
       if (!maybeHandleArmSIMDIntrinsic(I))
         if (!maybeHandleUnknownIntrinsic(I))
           visitInstruction(I);
 }
```
@llvmbot
Copy link
Member

llvmbot commented Aug 22, 2025

@llvm/pr-subscribers-llvm-transforms

Author: Thurston Dang (thurstond)

Changes

Currently it's a long, partly unsorted list. This patch separates them into helper functions, making the overall intent of visitIntrinsicInst() clearer:

 void visitIntrinsicInst(IntrinsicInst &I) {
   if (!maybeHandleCrossPlatformIntrinsic(I))
     if (!maybeHandleX86SIMDIntrinsic(I))
       if (!maybeHandleArmSIMDIntrinsic(I))
         if (!maybeHandleUnknownIntrinsic(I))
           visitInstruction(I);
 }

There is one disadvantage: the compiler will not tell us if the switch statements in the handlers have overlapping coverage.


Full diff: https://github.com/llvm/llvm-project/pull/154878.diff

1 Files Affected:

  • (modified) llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp (+80-49)
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 27292d1a66c30..28f47722e7ed3 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3263,7 +3263,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     return true;
   }
 
-  /// Heuristically instrument unknown intrinsics.
+  /// Returns whether it was able to heuristically instrument unknown
+  /// intrinsics.
   ///
   /// The main purpose of this code is to do something reasonable with all
   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
@@ -3273,7 +3274,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   ///
   /// We special-case intrinsics where this approach fails. See llvm.bswap
   /// handling as an example of that.
-  bool handleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
+  bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
     unsigned NumArgOperands = I.arg_size();
     if (NumArgOperands == 0)
       return false;
@@ -3300,8 +3301,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     return false;
   }
 
-  bool handleUnknownIntrinsic(IntrinsicInst &I) {
-    if (handleUnknownIntrinsicUnlogged(I)) {
+  bool maybeHandleUnknownIntrinsic(IntrinsicInst &I) {
+    if (maybeHandleUnknownIntrinsicUnlogged(I)) {
       if (ClDumpHeuristicInstructions)
         dumpInst(I);
 
@@ -5262,7 +5263,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     handleShadowOr(I);
   }
 
-  void visitIntrinsicInst(IntrinsicInst &I) {
+  bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &I) {
     switch (I.getIntrinsicID()) {
     case Intrinsic::uadd_with_overflow:
     case Intrinsic::sadd_with_overflow:
@@ -5342,6 +5343,32 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       handleVectorReduceWithStarterIntrinsic(I);
       break;
 
+    case Intrinsic::scmp:
+    case Intrinsic::ucmp: {
+      handleShadowOr(I);
+      break;
+    }
+
+    case Intrinsic::fshl:
+    case Intrinsic::fshr:
+      handleFunnelShift(I);
+      break;
+
+    case Intrinsic::is_constant:
+      // The result of llvm.is.constant() is always defined.
+      setShadow(&I, getCleanShadow(&I));
+      setOrigin(&I, getCleanOrigin());
+      break;
+
+    default:
+      return false;
+    }
+
+    return true;
+  }
+
+  bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &I) {
+    switch (I.getIntrinsicID()) {
     case Intrinsic::x86_sse_stmxcsr:
       handleStmxcsr(I);
       break;
@@ -5392,6 +5419,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       break;
     }
 
+    // Convert Packed Single Precision Floating-Point Values
+    //   to Packed Signed Doubleword Integer Values
+    //
+    // <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
+    //                (<16 x float>, <16 x i32>, i16, i32)
+    case Intrinsic::x86_avx512_mask_cvtps2dq_512:
+      handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
+      break;
+
     // Convert Packed Double Precision Floating-Point Values
     //   to Packed Single Precision Floating-Point Values
     case Intrinsic::x86_sse2_cvtpd2ps:
@@ -5492,23 +5528,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     case Intrinsic::x86_mmx_psrli_q:
     case Intrinsic::x86_mmx_psrai_w:
     case Intrinsic::x86_mmx_psrai_d:
-    case Intrinsic::aarch64_neon_rshrn:
-    case Intrinsic::aarch64_neon_sqrshl:
-    case Intrinsic::aarch64_neon_sqrshrn:
-    case Intrinsic::aarch64_neon_sqrshrun:
-    case Intrinsic::aarch64_neon_sqshl:
-    case Intrinsic::aarch64_neon_sqshlu:
-    case Intrinsic::aarch64_neon_sqshrn:
-    case Intrinsic::aarch64_neon_sqshrun:
-    case Intrinsic::aarch64_neon_srshl:
-    case Intrinsic::aarch64_neon_sshl:
-    case Intrinsic::aarch64_neon_uqrshl:
-    case Intrinsic::aarch64_neon_uqrshrn:
-    case Intrinsic::aarch64_neon_uqshl:
-    case Intrinsic::aarch64_neon_uqshrn:
-    case Intrinsic::aarch64_neon_urshl:
-    case Intrinsic::aarch64_neon_ushl:
-      // Not handled here: aarch64_neon_vsli (vector shift left and insert)
       handleVectorShiftIntrinsic(I, /* Variable */ false);
       break;
     case Intrinsic::x86_avx2_psllv_d:
@@ -5930,7 +5949,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     case Intrinsic::x86_avx512_max_pd_512: {
       // These AVX512 variants contain the rounding mode as a trailing flag.
       // Earlier variants do not have a trailing flag and are already handled
-      // by maybeHandleSimpleNomemIntrinsic(I, 0) via handleUnknownIntrinsic.
+      // by maybeHandleSimpleNomemIntrinsic(I, 0) via
+      // maybeHandleUnknownIntrinsic.
       [[maybe_unused]] bool Success =
           maybeHandleSimpleNomemIntrinsic(I, /*trailingFlags=*/1);
       assert(Success);
@@ -5988,15 +6008,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
                                         /*trailingVerbatimArgs=*/1);
       break;
 
-    // Convert Packed Single Precision Floating-Point Values
-    //   to Packed Signed Doubleword Integer Values
-    //
-    // <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
-    //                (<16 x float>, <16 x i32>, i16, i32)
-    case Intrinsic::x86_avx512_mask_cvtps2dq_512:
-      handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
-      break;
-
     // AVX512 PMOV: Packed MOV, with truncation
     // Precisely handled by applying the same intrinsic to the shadow
     case Intrinsic::x86_avx512_mask_pmov_dw_512:
@@ -6074,15 +6085,33 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       handleAVXGF2P8Affine(I);
       break;
 
-    case Intrinsic::fshl:
-    case Intrinsic::fshr:
-      handleFunnelShift(I);
-      break;
+    default:
+      return false;
+    }
 
-    case Intrinsic::is_constant:
-      // The result of llvm.is.constant() is always defined.
-      setShadow(&I, getCleanShadow(&I));
-      setOrigin(&I, getCleanOrigin());
+    return true;
+  }
+
+  bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &I) {
+    switch (I.getIntrinsicID()) {
+    case Intrinsic::aarch64_neon_rshrn:
+    case Intrinsic::aarch64_neon_sqrshl:
+    case Intrinsic::aarch64_neon_sqrshrn:
+    case Intrinsic::aarch64_neon_sqrshrun:
+    case Intrinsic::aarch64_neon_sqshl:
+    case Intrinsic::aarch64_neon_sqshlu:
+    case Intrinsic::aarch64_neon_sqshrn:
+    case Intrinsic::aarch64_neon_sqshrun:
+    case Intrinsic::aarch64_neon_srshl:
+    case Intrinsic::aarch64_neon_sshl:
+    case Intrinsic::aarch64_neon_uqrshl:
+    case Intrinsic::aarch64_neon_uqrshrn:
+    case Intrinsic::aarch64_neon_uqshl:
+    case Intrinsic::aarch64_neon_uqshrn:
+    case Intrinsic::aarch64_neon_urshl:
+    case Intrinsic::aarch64_neon_ushl:
+      // Not handled here: aarch64_neon_vsli (vector shift left and insert)
+      handleVectorShiftIntrinsic(I, /* Variable */ false);
       break;
 
     // TODO: handling max/min similarly to AND/OR may be more precise
@@ -6233,17 +6262,19 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       break;
     }
 
-    case Intrinsic::scmp:
-    case Intrinsic::ucmp: {
-      handleShadowOr(I);
-      break;
-    }
-
     default:
-      if (!handleUnknownIntrinsic(I))
-        visitInstruction(I);
-      break;
+      return false;
     }
+
+    return true;
+  }
+
+  void visitIntrinsicInst(IntrinsicInst &I) {
+    if (!maybeHandleCrossPlatformIntrinsic(I))
+      if (!maybeHandleX86SIMDIntrinsic(I))
+        if (!maybeHandleArmSIMDIntrinsic(I))
+          if (!maybeHandleUnknownIntrinsic(I))
+            visitInstruction(I);
   }
 
   void visitLibAtomicLoad(CallBase &CB) {

@llvmbot
Copy link
Member

llvmbot commented Aug 22, 2025

@llvm/pr-subscribers-compiler-rt-sanitizer

Author: Thurston Dang (thurstond)

Changes

Currently it's a long, partly unsorted list. This patch separates them into helper functions, making the overall intent of visitIntrinsicInst() clearer:

 void visitIntrinsicInst(IntrinsicInst &amp;I) {
   if (!maybeHandleCrossPlatformIntrinsic(I))
     if (!maybeHandleX86SIMDIntrinsic(I))
       if (!maybeHandleArmSIMDIntrinsic(I))
         if (!maybeHandleUnknownIntrinsic(I))
           visitInstruction(I);
 }

There is one disadvantage: the compiler will not tell us if the switch statements in the handlers have overlapping coverage.


Full diff: https://github.com/llvm/llvm-project/pull/154878.diff

1 Files Affected:

  • (modified) llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp (+80-49)
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 27292d1a66c30..28f47722e7ed3 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3263,7 +3263,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     return true;
   }
 
-  /// Heuristically instrument unknown intrinsics.
+  /// Returns whether it was able to heuristically instrument unknown
+  /// intrinsics.
   ///
   /// The main purpose of this code is to do something reasonable with all
   /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
@@ -3273,7 +3274,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
   ///
   /// We special-case intrinsics where this approach fails. See llvm.bswap
   /// handling as an example of that.
-  bool handleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
+  bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
     unsigned NumArgOperands = I.arg_size();
     if (NumArgOperands == 0)
       return false;
@@ -3300,8 +3301,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     return false;
   }
 
-  bool handleUnknownIntrinsic(IntrinsicInst &I) {
-    if (handleUnknownIntrinsicUnlogged(I)) {
+  bool maybeHandleUnknownIntrinsic(IntrinsicInst &I) {
+    if (maybeHandleUnknownIntrinsicUnlogged(I)) {
       if (ClDumpHeuristicInstructions)
         dumpInst(I);
 
@@ -5262,7 +5263,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     handleShadowOr(I);
   }
 
-  void visitIntrinsicInst(IntrinsicInst &I) {
+  bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &I) {
     switch (I.getIntrinsicID()) {
     case Intrinsic::uadd_with_overflow:
     case Intrinsic::sadd_with_overflow:
@@ -5342,6 +5343,32 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       handleVectorReduceWithStarterIntrinsic(I);
       break;
 
+    case Intrinsic::scmp:
+    case Intrinsic::ucmp: {
+      handleShadowOr(I);
+      break;
+    }
+
+    case Intrinsic::fshl:
+    case Intrinsic::fshr:
+      handleFunnelShift(I);
+      break;
+
+    case Intrinsic::is_constant:
+      // The result of llvm.is.constant() is always defined.
+      setShadow(&I, getCleanShadow(&I));
+      setOrigin(&I, getCleanOrigin());
+      break;
+
+    default:
+      return false;
+    }
+
+    return true;
+  }
+
+  bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &I) {
+    switch (I.getIntrinsicID()) {
     case Intrinsic::x86_sse_stmxcsr:
       handleStmxcsr(I);
       break;
@@ -5392,6 +5419,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       break;
     }
 
+    // Convert Packed Single Precision Floating-Point Values
+    //   to Packed Signed Doubleword Integer Values
+    //
+    // <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
+    //                (<16 x float>, <16 x i32>, i16, i32)
+    case Intrinsic::x86_avx512_mask_cvtps2dq_512:
+      handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
+      break;
+
     // Convert Packed Double Precision Floating-Point Values
     //   to Packed Single Precision Floating-Point Values
     case Intrinsic::x86_sse2_cvtpd2ps:
@@ -5492,23 +5528,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     case Intrinsic::x86_mmx_psrli_q:
     case Intrinsic::x86_mmx_psrai_w:
     case Intrinsic::x86_mmx_psrai_d:
-    case Intrinsic::aarch64_neon_rshrn:
-    case Intrinsic::aarch64_neon_sqrshl:
-    case Intrinsic::aarch64_neon_sqrshrn:
-    case Intrinsic::aarch64_neon_sqrshrun:
-    case Intrinsic::aarch64_neon_sqshl:
-    case Intrinsic::aarch64_neon_sqshlu:
-    case Intrinsic::aarch64_neon_sqshrn:
-    case Intrinsic::aarch64_neon_sqshrun:
-    case Intrinsic::aarch64_neon_srshl:
-    case Intrinsic::aarch64_neon_sshl:
-    case Intrinsic::aarch64_neon_uqrshl:
-    case Intrinsic::aarch64_neon_uqrshrn:
-    case Intrinsic::aarch64_neon_uqshl:
-    case Intrinsic::aarch64_neon_uqshrn:
-    case Intrinsic::aarch64_neon_urshl:
-    case Intrinsic::aarch64_neon_ushl:
-      // Not handled here: aarch64_neon_vsli (vector shift left and insert)
       handleVectorShiftIntrinsic(I, /* Variable */ false);
       break;
     case Intrinsic::x86_avx2_psllv_d:
@@ -5930,7 +5949,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
     case Intrinsic::x86_avx512_max_pd_512: {
       // These AVX512 variants contain the rounding mode as a trailing flag.
       // Earlier variants do not have a trailing flag and are already handled
-      // by maybeHandleSimpleNomemIntrinsic(I, 0) via handleUnknownIntrinsic.
+      // by maybeHandleSimpleNomemIntrinsic(I, 0) via
+      // maybeHandleUnknownIntrinsic.
       [[maybe_unused]] bool Success =
           maybeHandleSimpleNomemIntrinsic(I, /*trailingFlags=*/1);
       assert(Success);
@@ -5988,15 +6008,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
                                         /*trailingVerbatimArgs=*/1);
       break;
 
-    // Convert Packed Single Precision Floating-Point Values
-    //   to Packed Signed Doubleword Integer Values
-    //
-    // <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
-    //                (<16 x float>, <16 x i32>, i16, i32)
-    case Intrinsic::x86_avx512_mask_cvtps2dq_512:
-      handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
-      break;
-
     // AVX512 PMOV: Packed MOV, with truncation
     // Precisely handled by applying the same intrinsic to the shadow
     case Intrinsic::x86_avx512_mask_pmov_dw_512:
@@ -6074,15 +6085,33 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       handleAVXGF2P8Affine(I);
       break;
 
-    case Intrinsic::fshl:
-    case Intrinsic::fshr:
-      handleFunnelShift(I);
-      break;
+    default:
+      return false;
+    }
 
-    case Intrinsic::is_constant:
-      // The result of llvm.is.constant() is always defined.
-      setShadow(&I, getCleanShadow(&I));
-      setOrigin(&I, getCleanOrigin());
+    return true;
+  }
+
+  bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &I) {
+    switch (I.getIntrinsicID()) {
+    case Intrinsic::aarch64_neon_rshrn:
+    case Intrinsic::aarch64_neon_sqrshl:
+    case Intrinsic::aarch64_neon_sqrshrn:
+    case Intrinsic::aarch64_neon_sqrshrun:
+    case Intrinsic::aarch64_neon_sqshl:
+    case Intrinsic::aarch64_neon_sqshlu:
+    case Intrinsic::aarch64_neon_sqshrn:
+    case Intrinsic::aarch64_neon_sqshrun:
+    case Intrinsic::aarch64_neon_srshl:
+    case Intrinsic::aarch64_neon_sshl:
+    case Intrinsic::aarch64_neon_uqrshl:
+    case Intrinsic::aarch64_neon_uqrshrn:
+    case Intrinsic::aarch64_neon_uqshl:
+    case Intrinsic::aarch64_neon_uqshrn:
+    case Intrinsic::aarch64_neon_urshl:
+    case Intrinsic::aarch64_neon_ushl:
+      // Not handled here: aarch64_neon_vsli (vector shift left and insert)
+      handleVectorShiftIntrinsic(I, /* Variable */ false);
       break;
 
     // TODO: handling max/min similarly to AND/OR may be more precise
@@ -6233,17 +6262,19 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
       break;
     }
 
-    case Intrinsic::scmp:
-    case Intrinsic::ucmp: {
-      handleShadowOr(I);
-      break;
-    }
-
     default:
-      if (!handleUnknownIntrinsic(I))
-        visitInstruction(I);
-      break;
+      return false;
     }
+
+    return true;
+  }
+
+  void visitIntrinsicInst(IntrinsicInst &I) {
+    if (!maybeHandleCrossPlatformIntrinsic(I))
+      if (!maybeHandleX86SIMDIntrinsic(I))
+        if (!maybeHandleArmSIMDIntrinsic(I))
+          if (!maybeHandleUnknownIntrinsic(I))
+            visitInstruction(I);
   }
 
   void visitLibAtomicLoad(CallBase &CB) {


void visitIntrinsicInst(IntrinsicInst &I) {
if (!maybeHandleCrossPlatformIntrinsic(I))
if (!maybeHandleX86SIMDIntrinsic(I))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not

if (maybeHandleCrossPlatformIntrinsic(I))
  return;
if (maybeHandleX86SIMDIntrinsic(I))
  return;
[...]

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

@thurstond thurstond requested a review from fmayer August 23, 2025 01:17
@thurstond thurstond merged commit 5dafe66 into llvm:main Aug 25, 2025
9 checks passed
@llvm-ci
Copy link
Collaborator

llvm-ci commented Aug 25, 2025

LLVM Buildbot has detected a new failure on builder lldb-x86_64-debian running on lldb-x86_64-debian while building llvm at step 6 "test".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/162/builds/29680

Here is the relevant piece of the build log for the reference
Step 6 (test) failure: build (failure)
...
PASS: lldb-shell :: ObjectFile/PECOFF/settings-abi-i686.yaml (1632 of 3154)
PASS: lldb-shell :: Log/TestHandlers.test (1633 of 3154)
PASS: lldb-unit :: Core/./LLDBCoreTests/37/118 (1634 of 3154)
PASS: lldb-shell :: SymbolFile/DWARF/x86/no_unique_address-with-bitfields.cpp (1635 of 3154)
PASS: lldb-unit :: Core/./LLDBCoreTests/17/118 (1636 of 3154)
PASS: lldb-shell :: ObjectFile/Breakpad/breakpad-identification.test (1637 of 3154)
PASS: lldb-unit :: Core/./LLDBCoreTests/67/118 (1638 of 3154)
PASS: lldb-shell :: Commands/command-target-modules-lookup.test (1639 of 3154)
PASS: lldb-shell :: Breakpoint/condition-lang.test (1640 of 3154)
PASS: lldb-shell :: Diagnostics/TestDump.test (1641 of 3154)
FAIL: lldb-unit :: ProtocolServer/./ProtocolServerTests/5/7 (1642 of 3154)
******************** TEST 'lldb-unit :: ProtocolServer/./ProtocolServerTests/5/7' FAILED ********************
Script(shard):
--
GTEST_OUTPUT=json:/home/worker/2.0.1/lldb-x86_64-debian/build/tools/lldb/unittests/ProtocolServer/./ProtocolServerTests-lldb-unit-1906430-5-7.json GTEST_SHUFFLE=0 GTEST_TOTAL_SHARDS=7 GTEST_SHARD_INDEX=5 /home/worker/2.0.1/lldb-x86_64-debian/build/tools/lldb/unittests/ProtocolServer/./ProtocolServerTests
--

Script:
--
/home/worker/2.0.1/lldb-x86_64-debian/build/tools/lldb/unittests/ProtocolServer/./ProtocolServerTests --gtest_filter=ProtocolServerMCPTest.ToolsCallFail
--
unknown file: Failure

Unexpected mock function call - returning directly.
    Function call: Received(@0x7ffdee8f7e78 136-byte object <0B-00 00-00 00-00 00-00 90-7E 8F-EE FD-7F 00-00 00-00 00-00 00-00 00-00 00-67 65-5F 68-61 6E-64 00-65 72-2C 20-52 65-63 A5-80 FF-FF FF-FF FF-FF B8-7E 8F-EE FD-7F 00-00 0E-00 00-00 00-00 00-00 ... 22-66 61-69 6C-22 00-52 65-63 65-69 76-65 64-28 2A-65 78-70 65-63 74-65 64-5F 72-65 73-70 29-29 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00>)
Google Mock tried the following 1 expectation, but it didn't match:

/home/worker/2.0.1/lldb-x86_64-debian/llvm-project/lldb/unittests/ProtocolServer/ProtocolMCPServerTest.cpp:313: EXPECT_CALL(message_handler, Received(*expected_resp))...
  Expected arg #0: is equal to 136-byte object <0B-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 07-00 00-00 00-00 00-00 01-00 00-00 00-00 00-00 E0-EC 34-61 3D-56 00-00 ... 40-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 01-00 00-00 00-00 00-00>
           Actual: 136-byte object <0B-00 00-00 00-00 00-00 90-7E 8F-EE FD-7F 00-00 00-00 00-00 00-00 00-00 00-67 65-5F 68-61 6E-64 00-65 72-2C 20-52 65-63 A5-80 FF-FF FF-FF FF-FF B8-7E 8F-EE FD-7F 00-00 0E-00 00-00 00-00 00-00 ... 22-66 61-69 6C-22 00-52 65-63 65-69 76-65 64-28 2A-65 78-70 65-63 74-65 64-5F 72-65 73-70 29-29 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00>
         Expected: to be called once
           Actual: never called - unsatisfied and active

/home/worker/2.0.1/lldb-x86_64-debian/llvm-project/lldb/unittests/ProtocolServer/ProtocolMCPServerTest.cpp:313: Failure
Actual function call count doesn't match EXPECT_CALL(message_handler, Received(*expected_resp))...
         Expected: to be called once
           Actual: never called - unsatisfied and active


unknown file

Unexpected mock function call - returning directly.
    Function call: Received(@0x7ffdee8f7e78 136-byte object <0B-00 00-00 00-00 00-00 90-7E 8F-EE FD-7F 00-00 00-00 00-00 00-00 00-00 00-67 65-5F 68-61 6E-64 00-65 72-2C 20-52 65-63 A5-80 FF-FF FF-FF FF-FF B8-7E 8F-EE FD-7F 00-00 0E-00 00-00 00-00 00-00 ... 22-66 61-69 6C-22 00-52 65-63 65-69 76-65 64-28 2A-65 78-70 65-63 74-65 64-5F 72-65 73-70 29-29 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00>)
Google Mock tried the following 1 expectation, but it didn't match:

/home/worker/2.0.1/lldb-x86_64-debian/llvm-project/lldb/unittests/ProtocolServer/ProtocolMCPServerTest.cpp:313: EXPECT_CALL(message_handler, Received(*expected_resp))...
  Expected arg #0: is equal to 136-byte object <0B-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 07-00 00-00 00-00 00-00 01-00 00-00 00-00 00-00 E0-EC 34-61 3D-56 00-00 ... 40-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 01-00 00-00 00-00 00-00>
           Actual: 136-byte object <0B-00 00-00 00-00 00-00 90-7E 8F-EE FD-7F 00-00 00-00 00-00 00-00 00-00 00-67 65-5F 68-61 6E-64 00-65 72-2C 20-52 65-63 A5-80 FF-FF FF-FF FF-FF B8-7E 8F-EE FD-7F 00-00 0E-00 00-00 00-00 00-00 ... 22-66 61-69 6C-22 00-52 65-63 65-69 76-65 64-28 2A-65 78-70 65-63 74-65 64-5F 72-65 73-70 29-29 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00>
         Expected: to be called once

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

4 participants