|
| 1 | +//=-- lsan_common_fuchsia.cpp --------------------------------------------===// |
| 2 | +// |
| 3 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +// |
| 7 | +//===---------------------------------------------------------------------===// |
| 8 | +// |
| 9 | +// This file is a part of LeakSanitizer. |
| 10 | +// Implementation of common leak checking functionality. Fuchsia-specific code. |
| 11 | +// |
| 12 | +//===---------------------------------------------------------------------===// |
| 13 | + |
| 14 | +#include "lsan_common.h" |
| 15 | +#include "sanitizer_common/sanitizer_platform.h" |
| 16 | + |
| 17 | +#if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA |
| 18 | +#include <zircon/sanitizer.h> |
| 19 | + |
| 20 | +#include "lsan_allocator.h" |
| 21 | +#include "sanitizer_common/sanitizer_flags.h" |
| 22 | +#include "sanitizer_common/sanitizer_thread_registry.h" |
| 23 | + |
| 24 | +// Ensure that the Zircon system ABI is linked in. |
| 25 | +#pragma comment(lib, "zircon") |
| 26 | + |
| 27 | +namespace __lsan { |
| 28 | + |
| 29 | +void InitializePlatformSpecificModules() {} |
| 30 | + |
| 31 | +LoadedModule *GetLinker() { return nullptr; } |
| 32 | + |
| 33 | +__attribute__((tls_model("initial-exec"))) THREADLOCAL int disable_counter; |
| 34 | +bool DisabledInThisThread() { return disable_counter > 0; } |
| 35 | +void DisableInThisThread() { disable_counter++; } |
| 36 | +void EnableInThisThread() { |
| 37 | + if (disable_counter == 0) { |
| 38 | + DisableCounterUnderflow(); |
| 39 | + } |
| 40 | + disable_counter--; |
| 41 | +} |
| 42 | + |
| 43 | +// There is nothing left to do after the globals callbacks. |
| 44 | +void ProcessGlobalRegions(Frontier *frontier) {} |
| 45 | + |
| 46 | +// Nothing to do here. |
| 47 | +void ProcessPlatformSpecificAllocations(Frontier *frontier) {} |
| 48 | + |
| 49 | +// On Fuchsia, we can intercept _Exit gracefully, and return a failing exit |
| 50 | +// code if required at that point. Calling Die() here is undefined |
| 51 | +// behavior and causes rare race conditions. |
| 52 | +void HandleLeaks() {} |
| 53 | + |
| 54 | +int ExitHook(int status) { |
| 55 | + return status == 0 && HasReportedLeaks() ? common_flags()->exitcode : status; |
| 56 | +} |
| 57 | + |
| 58 | +void LockStuffAndStopTheWorld(StopTheWorldCallback callback, |
| 59 | + CheckForLeaksParam *argument) { |
| 60 | + LockThreadRegistry(); |
| 61 | + LockAllocator(); |
| 62 | + |
| 63 | + struct Params { |
| 64 | + InternalMmapVector<uptr> allocator_caches; |
| 65 | + StopTheWorldCallback callback; |
| 66 | + CheckForLeaksParam *argument; |
| 67 | + } params = {{}, callback, argument}; |
| 68 | + |
| 69 | + // Callback from libc for globals (data/bss modulo relro), when enabled. |
| 70 | + auto globals = +[](void *chunk, size_t size, void *data) { |
| 71 | + auto params = static_cast<const Params *>(data); |
| 72 | + uptr begin = reinterpret_cast<uptr>(chunk); |
| 73 | + uptr end = begin + size; |
| 74 | + ScanGlobalRange(begin, end, ¶ms->argument->frontier); |
| 75 | + }; |
| 76 | + |
| 77 | + // Callback from libc for thread stacks. |
| 78 | + auto stacks = +[](void *chunk, size_t size, void *data) { |
| 79 | + auto params = static_cast<const Params *>(data); |
| 80 | + uptr begin = reinterpret_cast<uptr>(chunk); |
| 81 | + uptr end = begin + size; |
| 82 | + ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "STACK", |
| 83 | + kReachable); |
| 84 | + }; |
| 85 | + |
| 86 | + // Callback from libc for thread registers. |
| 87 | + auto registers = +[](void *chunk, size_t size, void *data) { |
| 88 | + auto params = static_cast<const Params *>(data); |
| 89 | + uptr begin = reinterpret_cast<uptr>(chunk); |
| 90 | + uptr end = begin + size; |
| 91 | + ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "REGISTERS", |
| 92 | + kReachable); |
| 93 | + }; |
| 94 | + |
| 95 | + if (flags()->use_tls) { |
| 96 | + // Collect the allocator cache range from each thread so these |
| 97 | + // can all be excluded from the reported TLS ranges. |
| 98 | + GetAllThreadAllocatorCachesLocked(¶ms.allocator_caches); |
| 99 | + __sanitizer::Sort(params.allocator_caches.data(), |
| 100 | + params.allocator_caches.size()); |
| 101 | + } |
| 102 | + |
| 103 | + // Callback from libc for TLS regions. This includes thread_local |
| 104 | + // variables as well as C11 tss_set and POSIX pthread_setspecific. |
| 105 | + auto tls = +[](void *chunk, size_t size, void *data) { |
| 106 | + auto params = static_cast<const Params *>(data); |
| 107 | + uptr begin = reinterpret_cast<uptr>(chunk); |
| 108 | + uptr end = begin + size; |
| 109 | + auto i = __sanitizer::InternalLowerBound(params->allocator_caches, 0, |
| 110 | + params->allocator_caches.size(), |
| 111 | + begin, CompareLess<uptr>()); |
| 112 | + if (i < params->allocator_caches.size() && |
| 113 | + params->allocator_caches[i] >= begin && |
| 114 | + end - params->allocator_caches[i] <= sizeof(AllocatorCache)) { |
| 115 | + // Split the range in two and omit the allocator cache within. |
| 116 | + ScanRangeForPointers(begin, params->allocator_caches[i], |
| 117 | + ¶ms->argument->frontier, "TLS", kReachable); |
| 118 | + uptr begin2 = params->allocator_caches[i] + sizeof(AllocatorCache); |
| 119 | + ScanRangeForPointers(begin2, end, ¶ms->argument->frontier, "TLS", |
| 120 | + kReachable); |
| 121 | + } else { |
| 122 | + ScanRangeForPointers(begin, end, ¶ms->argument->frontier, "TLS", |
| 123 | + kReachable); |
| 124 | + } |
| 125 | + }; |
| 126 | + |
| 127 | + // This stops the world and then makes callbacks for various memory regions. |
| 128 | + // The final callback is the last thing before the world starts up again. |
| 129 | + __sanitizer_memory_snapshot( |
| 130 | + flags()->use_globals ? globals : nullptr, |
| 131 | + flags()->use_stacks ? stacks : nullptr, |
| 132 | + flags()->use_registers ? registers : nullptr, |
| 133 | + flags()->use_tls ? tls : nullptr, |
| 134 | + [](zx_status_t, void *data) { |
| 135 | + auto params = static_cast<const Params *>(data); |
| 136 | + |
| 137 | + // We don't use the thread registry at all for enumerating the threads |
| 138 | + // and their stacks, registers, and TLS regions. So use it separately |
| 139 | + // just for the allocator cache, and to call ForEachExtraStackRange, |
| 140 | + // which ASan needs. |
| 141 | + if (flags()->use_stacks) { |
| 142 | + GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( |
| 143 | + [](ThreadContextBase *tctx, void *arg) { |
| 144 | + ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb, |
| 145 | + arg); |
| 146 | + }, |
| 147 | + ¶ms->argument->frontier); |
| 148 | + } |
| 149 | + |
| 150 | + params->callback({}, params->argument); |
| 151 | + }, |
| 152 | + ¶ms); |
| 153 | + |
| 154 | + UnlockAllocator(); |
| 155 | + UnlockThreadRegistry(); |
| 156 | +} |
| 157 | + |
| 158 | +} // namespace __lsan |
| 159 | + |
| 160 | +// This is declared (in extern "C") by <zircon/sanitizer.h>. |
| 161 | +// _Exit calls this directly to intercept and change the status value. |
| 162 | +int __sanitizer_process_exit_hook(int status) { |
| 163 | + return __lsan::ExitHook(status); |
| 164 | +} |
| 165 | + |
| 166 | +#endif |
0 commit comments