diff --git a/clang/cmake/modules/AddClang.cmake b/clang/cmake/modules/AddClang.cmake index 75b0080f67156..96ac1dc9a86f9 100644 --- a/clang/cmake/modules/AddClang.cmake +++ b/clang/cmake/modules/AddClang.cmake @@ -169,7 +169,7 @@ macro(add_clang_tool name) get_target_export_arg(${name} Clang export_to_clangtargets) install(TARGETS ${name} ${export_to_clangtargets} - RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + RUNTIME DESTINATION "${LLVM_TOOLS_INSTALL_DIR}" COMPONENT ${name}) if(NOT LLVM_ENABLE_IDE) diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp index 6d61d276d77e3..47101258eb553 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp @@ -220,7 +220,7 @@ namespace __sanitizer { unsigned struct_sockaddr_sz = sizeof(struct sockaddr); unsigned ucontext_t_sz(void *ctx) { -# if SANITIZER_GLIBC && SANITIZER_X64 +# if SANITIZER_GLIBC && SANITIZER_X64 && __GLIBC_PREREQ (2, 27) // Added in Linux kernel 3.4.0, merged to glibc in 2.16 # ifndef FP_XSTATE_MAGIC1 # define FP_XSTATE_MAGIC1 0x46505853U diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp index 252979f1c2baa..e2bab23dae666 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp @@ -17,6 +17,10 @@ #include "sanitizer_flags.h" #include "sanitizer_platform_interceptors.h" +#if !defined(__APPLE__) +#include +#endif + namespace __sanitizer { #if SANITIZER_INTERCEPT_TLS_GET_ADDR @@ -140,6 +144,8 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res, tls_size = __sanitizer_get_allocated_size(start); VReport(2, "__tls_get_addr: glibc >=2.25 suspected; tls={0x%zx,0x%zx}\n", tls_beg, tls_size); + } else if (uptr size = malloc_usable_size((void *)tls_beg)) { + tls_size = size; } else { VReport(2, "__tls_get_addr: Can't guess glibc version\n"); // This may happen inside the DTOR of main thread, so just ignore it. diff --git a/compiler-rt/test/msan/Linux/signal_mcontext.cpp b/compiler-rt/test/msan/Linux/signal_mcontext.cpp index b49451fbb730b..11ef74e7462bb 100644 --- a/compiler-rt/test/msan/Linux/signal_mcontext.cpp +++ b/compiler-rt/test/msan/Linux/signal_mcontext.cpp @@ -10,7 +10,7 @@ void handler(int sig, siginfo_t *info, void *uctx) { __msan_check_mem_is_initialized(uctx, sizeof(ucontext_t)); -#if defined(__GLIBC__) && defined(__x86_64__) +#if defined(__GLIBC__) && defined(__x86_64__) && __GLIBC_PREREQ(2, 27) auto *mctx = &static_cast(uctx)->uc_mcontext; if (auto *fpregs = mctx->fpregs) { // The member names differ across header versions, but the actual layout diff --git a/libunwind/src/CompactUnwinder.hpp b/libunwind/src/CompactUnwinder.hpp index a7a8a153d86a4..ac8837aa37ec5 100644 --- a/libunwind/src/CompactUnwinder.hpp +++ b/libunwind/src/CompactUnwinder.hpp @@ -311,6 +311,50 @@ int CompactUnwinder_x86_64::stepWithCompactEncodingRBPFrame( uint32_t savedRegistersLocations = EXTRACT_BITS(compactEncoding, UNWIND_X86_64_RBP_FRAME_REGISTERS); + // If we have not stored EBP yet + if (functionStart == registers.getIP()) { + uint64_t rsp = registers.getSP(); + // old esp is ebp less return address + registers.setSP(rsp+8); + // pop return address into eip + registers.setIP(addressSpace.get64(rsp)); + + return UNW_STEP_SUCCESS; + } else if (functionStart + 1 == registers.getIP()) { + uint64_t rsp = registers.getSP(); + // old esp is ebp less return address + registers.setSP(rsp + 16); + // pop return address into eip + registers.setIP(addressSpace.get64(rsp + 8)); + + return UNW_STEP_SUCCESS; + } + + // If we're about to return, we've already popped the base pointer + uint8_t b = addressSpace.get8(registers.getIP()); + + // This is a hack to detect VZEROUPPER but in between popq rbp and ret + // It's not pretty but it works + if (b == 0xC5) { + if ((b = addressSpace.get8(registers.getIP() + 1)) == 0xF8 && + (b = addressSpace.get8(registers.getIP() + 2)) == 0x77) + b = addressSpace.get8(registers.getIP() + 3); + else + goto skip_ret; + } + + if (b == 0xC3 || b == 0xCB || b == 0xC2 || b == 0xCA) { + uint64_t rbp = registers.getSP(); + // old esp is ebp less return address + registers.setSP(rbp + 16); + // pop return address into eip + registers.setIP(addressSpace.get64(rbp + 8)); + + return UNW_STEP_SUCCESS; + } + + skip_ret: + uint64_t savedRegisters = registers.getRBP() - 8 * savedRegistersOffset; for (int i = 0; i < 5; ++i) { switch (savedRegistersLocations & 0x7) { @@ -431,6 +475,118 @@ int CompactUnwinder_x86_64::stepWithCompactEncodingFrameless( } } } + + // Note that the order of these registers is so that + // registersSaved[0] is the one that will be pushed onto the stack last. + // Thus, if we want to walk this from the top, we need to go in reverse. + assert(regCount <= 6); + + // check whether we are still in the prologue + uint64_t curAddr = functionStart; + if (regCount > 0) { + for (int8_t i = (int8_t)(regCount) - 1; i >= 0; --i) { + if (registers.getIP() == curAddr) { + // None of the registers have been modified yet, so we don't need to reload them + framelessUnwind(addressSpace, registers.getSP() + 8 * (regCount - (uint64_t)(i + 1)), registers); + return UNW_STEP_SUCCESS; + } else { + assert(curAddr < registers.getIP()); + } + + + // pushq %rbp and pushq %rbx is 1 byte. Everything else 2 + if ((UNWIND_X86_64_REG_RBP == registersSaved[i]) || + (UNWIND_X86_64_REG_RBX == registersSaved[i])) + curAddr += 1; + else + curAddr += 2; + } + } + if (registers.getIP() == curAddr) { + // None of the registers have been modified yet, so we don't need to reload them + framelessUnwind(addressSpace, registers.getSP() + 8*regCount, registers); + return UNW_STEP_SUCCESS; + } else { + assert(curAddr < registers.getIP()); + } + + + // And now for the epilogue + { + uint8_t i = 0; + uint64_t p = registers.getIP(); + uint8_t b = 0; + + while (true) { + b = addressSpace.get8(p++); + // This is a hack to detect VZEROUPPER but in between the popq's and ret + // It's not pretty but it works + if (b == 0xC5) { + if ((b = addressSpace.get8(p++)) == 0xF8 && (b = addressSpace.get8(p++)) == 0x77) + b = addressSpace.get8(p++); + else + break; + } + // popq %rbx popq %rbp + if (b == 0x5B || b == 0x5D) { + i++; + } else if (b == 0x41) { + b = addressSpace.get8(p++); + if (b == 0x5C || b == 0x5D || b == 0x5E || b == 0x5F) + i++; + else + break; + } else if (b == 0xC3 || b == 0xCB || b == 0xC2 || b == 0xCA) { + // i pop's haven't happened yet + uint64_t savedRegisters = registers.getSP() + 8 * i; + if (regCount > 0) { + for (int8_t j = (int8_t)(regCount) - 1; j >= (int8_t)(regCount) - i; --j) { + uint64_t addr = savedRegisters - 8 * (regCount - (uint64_t)(j)); + switch (registersSaved[j]) { + case UNWIND_X86_64_REG_RBX: + registers.setRBX(addressSpace.get64(addr)); + break; + case UNWIND_X86_64_REG_R12: + registers.setR12(addressSpace.get64(addr)); + break; + case UNWIND_X86_64_REG_R13: + registers.setR13(addressSpace.get64(addr)); + break; + case UNWIND_X86_64_REG_R14: + registers.setR14(addressSpace.get64(addr)); + break; + case UNWIND_X86_64_REG_R15: + registers.setR15(addressSpace.get64(addr)); + break; + case UNWIND_X86_64_REG_RBP: + registers.setRBP(addressSpace.get64(addr)); + break; + default: + _LIBUNWIND_DEBUG_LOG("bad register for frameless, encoding=%08X for " + "function starting at 0x%llX", + encoding, functionStart); + _LIBUNWIND_ABORT("invalid compact unwind encoding"); + } + } + } + framelessUnwind(addressSpace, savedRegisters, registers); + return UNW_STEP_SUCCESS; + } else { + break; + } + } + } + + /* + 0x10fe2733a: 5b popq %rbx + 0x10fe2733b: 41 5c popq %r12 + 0x10fe2733d: 41 5d popq %r13 + 0x10fe2733f: 41 5e popq %r14 + 0x10fe27341: 41 5f popq %r15 + 0x10fe27343: 5d popq %rbp + */ + + uint64_t savedRegisters = registers.getSP() + stackSize - 8 - 8 * regCount; for (uint32_t i = 0; i < regCount; ++i) { switch (registersSaved[i]) { diff --git a/lld/cmake/modules/AddLLD.cmake b/lld/cmake/modules/AddLLD.cmake index d3924f7243d40..01b4fe65a45ac 100644 --- a/lld/cmake/modules/AddLLD.cmake +++ b/lld/cmake/modules/AddLLD.cmake @@ -20,7 +20,7 @@ macro(add_lld_library name) ${export_to_lldtargets} LIBRARY DESTINATION lib${LLVM_LIBDIR_SUFFIX} ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX} - RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}") + RUNTIME DESTINATION ${LLVM_TOOLS_INSTALL_DIR}) if (${ARG_SHARED} AND NOT CMAKE_CONFIGURATION_TYPES) add_llvm_install_targets(install-${name} @@ -47,7 +47,7 @@ macro(add_lld_tool name) get_target_export_arg(${name} LLD export_to_lldtargets) install(TARGETS ${name} ${export_to_lldtargets} - RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" + RUNTIME DESTINATION ${LLVM_TOOLS_INSTALL_DIR} COMPONENT ${name}) if(NOT CMAKE_CONFIGURATION_TYPES) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 7191e89d36071..f8f895c43dd58 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -26347,6 +26347,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(), LLD->getBasePtr().getValueType())) return false; + return false; // The loads must not depend on one another. if (LLD->isPredecessorOf(RLD) || RLD->isPredecessorOf(LLD)) diff --git a/llvm/lib/MC/MCObjectFileInfo.cpp b/llvm/lib/MC/MCObjectFileInfo.cpp index 0b5109e41e717..df02d9e5863c5 100644 --- a/llvm/lib/MC/MCObjectFileInfo.cpp +++ b/llvm/lib/MC/MCObjectFileInfo.cpp @@ -65,10 +65,11 @@ void MCObjectFileInfo::initMachOMCObjectFileInfo(const Triple &T) { MachO::S_ATTR_STRIP_STATIC_SYMS | MachO::S_ATTR_LIVE_SUPPORT, SectionKind::getReadOnly()); - if (T.isOSDarwin() && - (T.getArch() == Triple::aarch64 || T.getArch() == Triple::aarch64_32 || - T.isSimulatorEnvironment())) - SupportsCompactUnwindWithoutEHFrame = true; + // Disabled for now, since we need to emit EH Frames for stack unwinding in the JIT + //if (T.isOSDarwin() && + // (T.getArch() == Triple::aarch64 || T.getArch() == Triple::aarch64_32 || + // T.isSimulatorEnvironment())) + // SupportsCompactUnwindWithoutEHFrame = true; switch (Ctx->emitDwarfUnwindInfo()) { case EmitDwarfUnwindType::Always: diff --git a/llvm/lib/MC/WinCOFFObjectWriter.cpp b/llvm/lib/MC/WinCOFFObjectWriter.cpp index c203280d2c107..b7b8692d53f31 100644 --- a/llvm/lib/MC/WinCOFFObjectWriter.cpp +++ b/llvm/lib/MC/WinCOFFObjectWriter.cpp @@ -1190,14 +1190,12 @@ void WinCOFFObjectWriter::reset() { bool WinCOFFObjectWriter::isSymbolRefDifferenceFullyResolvedImpl( const MCAssembler &Asm, const MCSymbol &SymA, const MCFragment &FB, bool InSet, bool IsPCRel) const { - // Don't drop relocations between functions, even if they are in the same text - // section. Multiple Visual C++ linker features depend on having the - // relocations present. The /INCREMENTAL flag will cause these relocations to - // point to thunks, and the /GUARD:CF flag assumes that it can use relocations - // to approximate the set of all address taken functions. LLD's implementation - // of /GUARD:CF also relies on the existance of these relocations. + // MS LINK expects to be able to replace all references to a function with a + // thunk to implement their /INCREMENTAL feature. Make sure we don't optimize + // away any relocations to functions. uint16_t Type = cast(SymA).getType(); - if ((Type >> COFF::SCT_COMPLEX_TYPE_SHIFT) == COFF::IMAGE_SYM_DTYPE_FUNCTION) + if (Asm.isIncrementalLinkerCompatible() && + (Type >> COFF::SCT_COMPLEX_TYPE_SHIFT) == COFF::IMAGE_SYM_DTYPE_FUNCTION) return false; return MCObjectWriter::isSymbolRefDifferenceFullyResolvedImpl(Asm, SymA, FB, InSet, IsPCRel); diff --git a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index ce35eefb63fa2..70f6df7270b6e 100644 --- a/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -375,7 +375,9 @@ static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) { // with them. if (Addr) { Type *PtrTy = cast(Addr->getType()->getScalarType()); - if (PtrTy->getPointerAddressSpace() != 0) + auto AS = PtrTy->getPointerAddressSpace(); + // Allow for custom addresspaces + if (AS != 0 && AS < 10) return false; } diff --git a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll index 25a44ecd9d241..738680e573462 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/alloca.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/alloca.ll @@ -72,6 +72,20 @@ entry: ; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 20, ; CHECK: ret void +define void @array32() sanitize_memory { +entry: + %x = alloca i32, i32 5, align 4 + ret void +} + +; CHECK-LABEL: define void @array32( +; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 20, i1 false) +; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 20) +; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 20, +; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 20, +; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 20, +; CHECK: ret void + define void @array_non_const(i64 %cnt) sanitize_memory { entry: %x = alloca i32, i64 %cnt, align 4 @@ -103,6 +117,22 @@ entry: ; KMSAN: call void @__msan_poison_alloca(ptr {{.*}}, i64 %[[A]], ; CHECK: ret void +define void @array_non_const32(i32 %cnt) sanitize_memory { +entry: + %x = alloca i32, i32 %cnt, align 4 + ret void +} + +; CHECK-LABEL: define void @array_non_const32( +; CHECK: %[[Z:.*]] = zext i32 %cnt to i64 +; CHECK: %[[A:.*]] = mul i64 4, %[[Z]] +; INLINE: call void @llvm.memset.p0i8.i64(i8* align 4 {{.*}}, i8 -1, i64 %[[A]], i1 false) +; CALL: call void @__msan_poison_stack(i8* {{.*}}, i64 %[[A]]) +; ORIGIN: call void @__msan_set_alloca_origin_with_descr(i8* {{.*}}, i64 %[[A]], +; ORIGIN-LEAN: call void @__msan_set_alloca_origin_no_descr(i8* {{.*}}, i64 %[[A]], +; KMSAN: call void @__msan_poison_alloca(i8* {{.*}}, i64 %[[A]], +; CHECK: ret void + ; Check that the local is unpoisoned in the absence of sanitize_memory define void @unpoison_local() { entry: diff --git a/llvm/test/MC/COFF/diff.s b/llvm/test/MC/COFF/diff.s index 90466b59d0252..640bf8189e039 100644 --- a/llvm/test/MC/COFF/diff.s +++ b/llvm/test/MC/COFF/diff.s @@ -1,14 +1,19 @@ // RUN: llvm-mc -filetype=obj -triple i686-pc-mingw32 %s | llvm-readobj -S --sr --sd - | FileCheck %s -// COFF resolves differences between labels in the same section, unless that -// label is declared with function type. - .section baz, "xr" + .def X + .scl 2; + .type 32; + .endef .globl X X: mov Y-X+42, %eax retl + .def Y + .scl 2; + .type 32; + .endef .globl Y Y: retl @@ -25,11 +30,6 @@ _foobar: # @foobar # %bb.0: ret - .globl _baz -_baz: - calll _foobar - retl - .data .globl _rust_crate # @rust_crate .align 4 @@ -39,15 +39,6 @@ _rust_crate: .long _foobar-_rust_crate .long _foobar-_rust_crate -// Even though _baz and _foobar are in the same .text section, we keep the -// relocation for compatibility with the VC linker's /guard:cf and /incremental -// flags, even on mingw. - -// CHECK: Name: .text -// CHECK: Relocations [ -// CHECK-NEXT: 0x12 IMAGE_REL_I386_REL32 _foobar -// CHECK-NEXT: ] - // CHECK: Name: .data // CHECK: Relocations [ // CHECK-NEXT: 0x4 IMAGE_REL_I386_DIR32 _foobar diff --git a/llvm/test/Transforms/MergeICmps/addressspaces.ll b/llvm/test/Transforms/MergeICmps/addressspaces.ll new file mode 100644 index 0000000000000..9a74b4a5b2ca4 --- /dev/null +++ b/llvm/test/Transforms/MergeICmps/addressspaces.ll @@ -0,0 +1,67 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -mergeicmps -S | FileCheck %s + +source_filename = "==" +target datalayout = "e-m:e-i64:64-n32:64" +target triple = "powerpc64le-unknown-linux-gnu" + +define void @juliaAS([2 x [5 x i64]] addrspace(11)* nocapture nonnull readonly align 8 dereferenceable(80) %0, [2 x [5 x i64]] addrspace(11)* nocapture nonnull readonly align 8 dereferenceable(80) %1) { +; CHECK-LABEL: @juliaAS( +; CHECK-NEXT: top: +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* [[TMP0:%.*]], i64 0, i64 1, i64 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* [[TMP0]], i64 0, i64 1, i64 3 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* [[TMP0]], i64 0, i64 1, i64 4 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* [[TMP1:%.*]], i64 0, i64 1, i64 2 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* [[TMP1]], i64 0, i64 1, i64 3 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* [[TMP1]], i64 0, i64 1, i64 4 +; CHECK-NEXT: [[TMP8:%.*]] = load i64, i64 addrspace(11)* [[TMP2]], align 8 +; CHECK-NEXT: [[TMP9:%.*]] = load i64, i64 addrspace(11)* [[TMP5]], align 8 +; CHECK-NEXT: [[DOTNOT17:%.*]] = icmp eq i64 [[TMP8]], [[TMP9]] +; CHECK-NEXT: br i1 [[DOTNOT17]], label [[L70:%.*]], label [[L90:%.*]] +; CHECK: L70: +; CHECK-NEXT: [[TMP10:%.*]] = load i64, i64 addrspace(11)* [[TMP3]], align 8 +; CHECK-NEXT: [[TMP11:%.*]] = load i64, i64 addrspace(11)* [[TMP6]], align 8 +; CHECK-NEXT: [[DOTNOT18:%.*]] = icmp eq i64 [[TMP10]], [[TMP11]] +; CHECK-NEXT: br i1 [[DOTNOT18]], label [[L74:%.*]], label [[L90]] +; CHECK: L74: +; CHECK-NEXT: [[TMP12:%.*]] = load i64, i64 addrspace(11)* [[TMP4]], align 8 +; CHECK-NEXT: [[TMP13:%.*]] = load i64, i64 addrspace(11)* [[TMP7]], align 8 +; CHECK-NEXT: [[DOTNOT19:%.*]] = icmp eq i64 [[TMP12]], [[TMP13]] +; CHECK-NEXT: br label [[L90]] +; CHECK: L90: +; CHECK-NEXT: [[VALUE_PHI2_OFF0:%.*]] = phi i1 [ false, [[TOP:%.*]] ], [ [[DOTNOT19]], [[L74]] ], [ false, [[L70]] ] +; CHECK-NEXT: ret void +; +top: + %2 = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* %0, i64 0, i64 1, i64 2 + %3 = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* %0, i64 0, i64 1, i64 3 + %4 = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* %0, i64 0, i64 1, i64 4 + %5 = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* %1, i64 0, i64 1, i64 2 + %6 = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* %1, i64 0, i64 1, i64 3 + %7 = getelementptr inbounds [2 x [5 x i64]], [2 x [5 x i64]] addrspace(11)* %1, i64 0, i64 1, i64 4 + %8 = load i64, i64 addrspace(11)* %2, align 8 + %9 = load i64, i64 addrspace(11)* %5, align 8 + %.not17 = icmp eq i64 %8, %9 + br i1 %.not17, label %L70, label %L90 + +L70: ; preds = %top + %10 = load i64, i64 addrspace(11)* %3, align 8 + %11 = load i64, i64 addrspace(11)* %6, align 8 + %.not18 = icmp eq i64 %10, %11 + br i1 %.not18, label %L74, label %L90 + +L74: ; preds = %L70 + %12 = load i64, i64 addrspace(11)* %4, align 8 + %13 = load i64, i64 addrspace(11)* %7, align 8 + %.not19 = icmp eq i64 %12, %13 + br label %L90 + +L90: ; preds = %L74, %L70, %top + %value_phi2.off0 = phi i1 [ false, %top ], [ %.not19, %L74 ], [ false, %L70 ] + ret void +} + +!llvm.module.flags = !{!0} + +!0 = !{i32 1, !"Debug Info Version", i32 3} +