mirror of
git://gcc.gnu.org/git/gcc.git
synced 2025-04-02 08:40:26 +08:00
libsanitizer: merge from upstream (0a1bcab9f3bf75c4c5d3e53bafb3eeb80320af46).
This commit is contained in:
parent
e2285af309
commit
f732bf6a60
@ -1,4 +1,4 @@
|
||||
82bc6a094e85014f1891ef9407496f44af8fe442
|
||||
0a1bcab9f3bf75c4c5d3e53bafb3eeb80320af46
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
@ -112,7 +112,7 @@ void AsanDeactivate() {
|
||||
disabled.quarantine_size_mb = 0;
|
||||
disabled.thread_local_quarantine_size_kb = 0;
|
||||
// Redzone must be at least Max(16, granularity) bytes long.
|
||||
disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY);
|
||||
disabled.min_redzone = Max(16, (int)ASAN_SHADOW_GRANULARITY);
|
||||
disabled.max_redzone = disabled.min_redzone;
|
||||
disabled.alloc_dealloc_mismatch = false;
|
||||
disabled.may_return_null = true;
|
||||
|
@ -210,8 +210,7 @@ struct QuarantineCallback {
|
||||
CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
|
||||
}
|
||||
|
||||
PoisonShadow(m->Beg(),
|
||||
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||
PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
|
||||
kAsanHeapLeftRedzoneMagic);
|
||||
|
||||
// Statistics.
|
||||
@ -305,7 +304,6 @@ struct Allocator {
|
||||
QuarantineCache fallback_quarantine_cache;
|
||||
|
||||
uptr max_user_defined_malloc_size;
|
||||
atomic_uint8_t rss_limit_exceeded;
|
||||
|
||||
// ------------------- Options --------------------------
|
||||
atomic_uint16_t min_redzone;
|
||||
@ -345,14 +343,6 @@ struct Allocator {
|
||||
: kMaxAllowedMallocSize;
|
||||
}
|
||||
|
||||
bool RssLimitExceeded() {
|
||||
return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetRssLimitExceeded(bool limit_exceeded) {
|
||||
atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void RePoisonChunk(uptr chunk) {
|
||||
// This could be a user-facing chunk (with redzones), or some internal
|
||||
// housekeeping chunk, like TransferBatch. Start by assuming the former.
|
||||
@ -366,7 +356,7 @@ struct Allocator {
|
||||
if (chunk < beg && beg < end && end <= chunk_end) {
|
||||
// Looks like a valid AsanChunk in use, poison redzones only.
|
||||
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
|
||||
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
|
||||
uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
|
||||
FastPoisonShadowPartialRightRedzone(
|
||||
end_aligned_down, end - end_aligned_down,
|
||||
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
|
||||
@ -484,14 +474,14 @@ struct Allocator {
|
||||
AllocType alloc_type, bool can_fill) {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
AsanInitFromRtl();
|
||||
if (RssLimitExceeded()) {
|
||||
if (UNLIKELY(IsRssLimitExceeded())) {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportRssLimitExceeded(stack);
|
||||
}
|
||||
Flags &fl = *flags();
|
||||
CHECK(stack);
|
||||
const uptr min_alignment = SHADOW_GRANULARITY;
|
||||
const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
|
||||
const uptr user_requested_alignment_log =
|
||||
ComputeUserRequestedAlignmentLog(alignment);
|
||||
if (alignment < min_alignment)
|
||||
@ -572,7 +562,7 @@ struct Allocator {
|
||||
m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
|
||||
|
||||
uptr size_rounded_down_to_granularity =
|
||||
RoundDownTo(size, SHADOW_GRANULARITY);
|
||||
RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
|
||||
// Unpoison the bulk of the memory region.
|
||||
if (size_rounded_down_to_granularity)
|
||||
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
|
||||
@ -580,7 +570,7 @@ struct Allocator {
|
||||
if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
|
||||
u8 *shadow =
|
||||
(u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
|
||||
*shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
|
||||
*shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
|
||||
}
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
@ -607,7 +597,7 @@ struct Allocator {
|
||||
CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
|
||||
reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
|
||||
}
|
||||
ASAN_MALLOC_HOOK(res, size);
|
||||
RunMallocHooks(res, size);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -650,8 +640,7 @@ struct Allocator {
|
||||
}
|
||||
|
||||
// Poison the region.
|
||||
PoisonShadow(m->Beg(),
|
||||
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||
PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
|
||||
kAsanHeapFreeMagic);
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
@ -689,7 +678,7 @@ struct Allocator {
|
||||
return;
|
||||
}
|
||||
|
||||
ASAN_FREE_HOOK(ptr);
|
||||
RunFreeHooks(ptr);
|
||||
|
||||
// Must mark the chunk as quarantined before any changes to its metadata.
|
||||
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
|
||||
@ -851,12 +840,12 @@ struct Allocator {
|
||||
quarantine.PrintStats();
|
||||
}
|
||||
|
||||
void ForceLock() ACQUIRE(fallback_mutex) {
|
||||
void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
|
||||
allocator.ForceLock();
|
||||
fallback_mutex.Lock();
|
||||
}
|
||||
|
||||
void ForceUnlock() RELEASE(fallback_mutex) {
|
||||
void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
|
||||
fallback_mutex.Unlock();
|
||||
allocator.ForceUnlock();
|
||||
}
|
||||
@ -1065,14 +1054,12 @@ uptr asan_mz_size(const void *ptr) {
|
||||
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||
}
|
||||
|
||||
void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
|
||||
|
||||
void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
|
||||
instance.ForceUnlock();
|
||||
void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
instance.ForceLock();
|
||||
}
|
||||
|
||||
void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
|
||||
instance.SetRssLimitExceeded(limit_exceeded);
|
||||
void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
instance.ForceUnlock();
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
@ -1230,16 +1217,3 @@ int __asan_update_allocation_context(void* addr) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return instance.UpdateAllocationStack((uptr)addr, &stack);
|
||||
}
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// Provide default (no-op) implementation of malloc hooks.
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
|
||||
void *ptr, uptr size) {
|
||||
(void)ptr;
|
||||
(void)size;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
|
||||
(void)ptr;
|
||||
}
|
||||
#endif
|
||||
|
@ -141,7 +141,7 @@ uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) {
|
||||
if (shadow_scale)
|
||||
*shadow_scale = SHADOW_SCALE;
|
||||
*shadow_scale = ASAN_SHADOW_SCALE;
|
||||
if (shadow_offset)
|
||||
*shadow_offset = SHADOW_OFFSET;
|
||||
*shadow_offset = ASAN_SHADOW_OFFSET;
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() {
|
||||
" old_mid : %p\n"
|
||||
" new_mid : %p\n",
|
||||
(void *)beg, (void *)end, (void *)old_mid, (void *)new_mid);
|
||||
uptr granularity = SHADOW_GRANULARITY;
|
||||
uptr granularity = ASAN_SHADOW_GRANULARITY;
|
||||
if (!IsAligned(beg, granularity))
|
||||
Report("ERROR: beg is not aligned by %zu\n", granularity);
|
||||
stack->Print();
|
||||
@ -410,7 +410,8 @@ ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
|
||||
if (AddrIsInMem(addr)) {
|
||||
u8 *shadow_addr = (u8 *)MemToShadow(addr);
|
||||
// If we are accessing 16 bytes, look at the second shadow byte.
|
||||
if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
|
||||
if (*shadow_addr == 0 && access_size > ASAN_SHADOW_GRANULARITY)
|
||||
shadow_addr++;
|
||||
// If we are in the partial right redzone, look at the next shadow byte.
|
||||
if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
|
||||
bool far_from_bounds = false;
|
||||
@ -501,10 +502,11 @@ static void PrintLegend(InternalScopedString *str) {
|
||||
str->append(
|
||||
"Shadow byte legend (one shadow byte represents %d "
|
||||
"application bytes):\n",
|
||||
(int)SHADOW_GRANULARITY);
|
||||
(int)ASAN_SHADOW_GRANULARITY);
|
||||
PrintShadowByte(str, " Addressable: ", 0);
|
||||
str->append(" Partially addressable: ");
|
||||
for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
|
||||
for (u8 i = 1; i < ASAN_SHADOW_GRANULARITY; i++)
|
||||
PrintShadowByte(str, "", i, " ");
|
||||
str->append("\n");
|
||||
PrintShadowByte(str, " Heap left redzone: ",
|
||||
kAsanHeapLeftRedzoneMagic);
|
||||
|
@ -53,9 +53,9 @@ struct ErrorDeadlySignal : ErrorBase {
|
||||
scariness.Scare(10, "null-deref");
|
||||
} else if (signal.addr == signal.pc) {
|
||||
scariness.Scare(60, "wild-jump");
|
||||
} else if (signal.write_flag == SignalContext::WRITE) {
|
||||
} else if (signal.write_flag == SignalContext::Write) {
|
||||
scariness.Scare(30, "wild-addr-write");
|
||||
} else if (signal.write_flag == SignalContext::READ) {
|
||||
} else if (signal.write_flag == SignalContext::Read) {
|
||||
scariness.Scare(20, "wild-addr-read");
|
||||
} else {
|
||||
scariness.Scare(25, "wild-addr");
|
||||
@ -372,7 +372,7 @@ struct ErrorGeneric : ErrorBase {
|
||||
u8 shadow_val;
|
||||
|
||||
ErrorGeneric() = default; // (*)
|
||||
ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
|
||||
ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr, bool is_write_,
|
||||
uptr access_size_);
|
||||
void Print();
|
||||
};
|
||||
|
@ -28,8 +28,8 @@ static const u64 kAllocaRedzoneMask = 31UL;
|
||||
// For small size classes inline PoisonShadow for better performance.
|
||||
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
|
||||
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
|
||||
if (SHADOW_SCALE == 3 && class_id <= 6) {
|
||||
// This code expects SHADOW_SCALE=3.
|
||||
if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) {
|
||||
// This code expects ASAN_SHADOW_SCALE=3.
|
||||
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
|
||||
shadow[i] = magic;
|
||||
// Make sure this does not become memset.
|
||||
@ -140,7 +140,6 @@ void FakeStack::HandleNoReturn() {
|
||||
// We do it based on their 'real_stack' values -- everything that is lower
|
||||
// than the current real_stack is garbage.
|
||||
NOINLINE void FakeStack::GC(uptr real_stack) {
|
||||
uptr collected = 0;
|
||||
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
|
||||
u8 *flags = GetFlags(stack_size_log(), class_id);
|
||||
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
|
||||
@ -150,7 +149,6 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
|
||||
GetFrame(stack_size_log(), class_id, i));
|
||||
if (ff->real_stack < real_stack) {
|
||||
flags[i] = 0;
|
||||
collected++;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -294,10 +292,10 @@ void __asan_alloca_poison(uptr addr, uptr size) {
|
||||
uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
|
||||
uptr PartialRzAddr = addr + size;
|
||||
uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
|
||||
uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
|
||||
uptr PartialRzAligned = PartialRzAddr & ~(ASAN_SHADOW_GRANULARITY - 1);
|
||||
FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
|
||||
FastPoisonShadowPartialRightRedzone(
|
||||
PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
|
||||
PartialRzAligned, PartialRzAddr % ASAN_SHADOW_GRANULARITY,
|
||||
RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
|
||||
FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
|
||||
}
|
||||
@ -305,7 +303,8 @@ void __asan_alloca_poison(uptr addr, uptr size) {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_allocas_unpoison(uptr top, uptr bottom) {
|
||||
if ((!top) || (top > bottom)) return;
|
||||
REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
|
||||
(bottom - top) / SHADOW_GRANULARITY);
|
||||
REAL(memset)
|
||||
(reinterpret_cast<void *>(MemToShadow(top)), 0,
|
||||
(bottom - top) / ASAN_SHADOW_GRANULARITY);
|
||||
}
|
||||
} // extern "C"
|
||||
|
@ -140,9 +140,9 @@ void InitializeFlags() {
|
||||
SanitizerToolName);
|
||||
Die();
|
||||
}
|
||||
// Ensure that redzone is at least SHADOW_GRANULARITY.
|
||||
if (f->redzone < (int)SHADOW_GRANULARITY)
|
||||
f->redzone = SHADOW_GRANULARITY;
|
||||
// Ensure that redzone is at least ASAN_SHADOW_GRANULARITY.
|
||||
if (f->redzone < (int)ASAN_SHADOW_GRANULARITY)
|
||||
f->redzone = ASAN_SHADOW_GRANULARITY;
|
||||
// Make "strict_init_order" imply "check_initialization_order".
|
||||
// TODO(samsonov): Use a single runtime flag for an init-order checker.
|
||||
if (f->strict_init_order) {
|
||||
|
@ -49,9 +49,10 @@ ASAN_FLAG(
|
||||
"to find more errors.")
|
||||
ASAN_FLAG(bool, replace_intrin, true,
|
||||
"If set, uses custom wrappers for memset/memcpy/memmove intrinsics.")
|
||||
ASAN_FLAG(bool, detect_stack_use_after_return, false,
|
||||
ASAN_FLAG(bool, detect_stack_use_after_return,
|
||||
SANITIZER_LINUX && !SANITIZER_ANDROID,
|
||||
"Enables stack-use-after-return checking at run-time.")
|
||||
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
|
||||
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
|
||||
"Minimum fake stack size log.")
|
||||
ASAN_FLAG(int, max_uar_stack_size_log,
|
||||
20, // 1Mb per size class, i.e. ~11Mb per thread
|
||||
|
@ -14,16 +14,17 @@
|
||||
#include "sanitizer_common/sanitizer_fuchsia.h"
|
||||
#if SANITIZER_FUCHSIA
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
|
||||
#include <limits.h>
|
||||
#include <zircon/sanitizer.h>
|
||||
#include <zircon/syscalls.h>
|
||||
#include <zircon/threads.h>
|
||||
|
||||
# include "asan_interceptors.h"
|
||||
# include "asan_internal.h"
|
||||
# include "asan_stack.h"
|
||||
# include "asan_thread.h"
|
||||
# include "lsan/lsan_common.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// The system already set up the shadow memory for us.
|
||||
@ -118,14 +119,12 @@ struct AsanThread::InitOptions {
|
||||
|
||||
// Shared setup between thread creation and startup for the initial thread.
|
||||
static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
|
||||
uptr user_id, bool detached,
|
||||
const char *name) {
|
||||
bool detached, const char *name) {
|
||||
// In lieu of AsanThread::Create.
|
||||
AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__);
|
||||
|
||||
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
|
||||
u32 tid =
|
||||
asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
|
||||
u32 tid = asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
|
||||
asanThreadRegistry().SetThreadName(tid, name);
|
||||
|
||||
return thread;
|
||||
@ -152,7 +151,7 @@ AsanThread *CreateMainThread() {
|
||||
CHECK_NE(__sanitizer::MainThreadStackBase, 0);
|
||||
CHECK_GT(__sanitizer::MainThreadStackSize, 0);
|
||||
AsanThread *t = CreateAsanThread(
|
||||
nullptr, 0, reinterpret_cast<uptr>(self), true,
|
||||
nullptr, 0, true,
|
||||
_zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name,
|
||||
sizeof(name)) == ZX_OK
|
||||
? name
|
||||
@ -182,8 +181,7 @@ static void *BeforeThreadCreateHook(uptr user_id, bool detached,
|
||||
GET_STACK_TRACE_THREAD;
|
||||
u32 parent_tid = GetCurrentTidOrInvalid();
|
||||
|
||||
AsanThread *thread =
|
||||
CreateAsanThread(&stack, parent_tid, user_id, detached, name);
|
||||
AsanThread *thread = CreateAsanThread(&stack, parent_tid, detached, name);
|
||||
|
||||
// On other systems, AsanThread::Init() is called from the new
|
||||
// thread itself. But on Fuchsia we already know the stack address
|
||||
@ -238,8 +236,18 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
|
||||
__sanitizer_fill_shadow(p, size, 0, 0);
|
||||
}
|
||||
|
||||
// On Fuchsia, leak detection is done by a special hook after atexit hooks.
|
||||
// So this doesn't install any atexit hook like on other platforms.
|
||||
void InstallAtExitCheckLeaks() {}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
bool UseExitcodeOnLeak() { return __asan::flags()->halt_on_error; }
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
// These are declared (in extern "C") by <zircon/sanitizer.h>.
|
||||
// The system runtime will call our definitions directly.
|
||||
|
||||
|
@ -61,14 +61,13 @@ ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void PoisonRedZones(const Global &g) {
|
||||
uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY);
|
||||
uptr aligned_size = RoundUpTo(g.size, ASAN_SHADOW_GRANULARITY);
|
||||
FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size,
|
||||
kAsanGlobalRedzoneMagic);
|
||||
if (g.size != aligned_size) {
|
||||
FastPoisonShadowPartialRightRedzone(
|
||||
g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY),
|
||||
g.size % SHADOW_GRANULARITY,
|
||||
SHADOW_GRANULARITY,
|
||||
g.beg + RoundDownTo(g.size, ASAN_SHADOW_GRANULARITY),
|
||||
g.size % ASAN_SHADOW_GRANULARITY, ASAN_SHADOW_GRANULARITY,
|
||||
kAsanGlobalRedzoneMagic);
|
||||
}
|
||||
}
|
||||
@ -154,6 +153,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||
// We use this method in case compiler doesn't use private aliases for global
|
||||
// variables.
|
||||
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clang provides two different ways for global variables protection:
|
||||
// it can poison the global itself or its private alias. In former
|
||||
// case we may poison same symbol multiple times, that can help us to
|
||||
@ -199,6 +215,8 @@ static void RegisterGlobal(const Global *g) {
|
||||
// where two globals with the same name are defined in different modules.
|
||||
if (UseODRIndicator(g))
|
||||
CheckODRViolationViaIndicator(g);
|
||||
else
|
||||
CheckODRViolationViaPoisoning(g);
|
||||
}
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
|
@ -130,23 +130,24 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
|
||||
// Strict init-order checking is dlopen-hostile:
|
||||
// https://github.com/google/sanitizers/issues/178
|
||||
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
|
||||
do { \
|
||||
if (flags()->strict_init_order) \
|
||||
StopInitOrderChecking(); \
|
||||
CheckNoDeepBind(filename, flag); \
|
||||
} while (false)
|
||||
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
|
||||
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
|
||||
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
|
||||
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
|
||||
#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
|
||||
if (AsanThread *t = GetCurrentThread()) { \
|
||||
*begin = t->tls_begin(); \
|
||||
*end = t->tls_end(); \
|
||||
} else { \
|
||||
*begin = *end = 0; \
|
||||
}
|
||||
# define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
|
||||
({ \
|
||||
if (flags()->strict_init_order) \
|
||||
StopInitOrderChecking(); \
|
||||
CheckNoDeepBind(filename, flag); \
|
||||
REAL(dlopen)(filename, flag); \
|
||||
})
|
||||
# define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
|
||||
# define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
|
||||
# define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
|
||||
# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
|
||||
# define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
|
||||
if (AsanThread *t = GetCurrentThread()) { \
|
||||
*begin = t->tls_begin(); \
|
||||
*end = t->tls_end(); \
|
||||
} else { \
|
||||
*begin = *end = 0; \
|
||||
}
|
||||
|
||||
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
|
||||
do { \
|
||||
|
@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
|
||||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
|
||||
!SANITIZER_NETBSD
|
||||
# define ASAN_INTERCEPT___CXA_THROW 1
|
||||
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
|
||||
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# else
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
|
||||
# endif
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
|
||||
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
|
||||
# else
|
||||
|
@ -7,6 +7,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Asan interface list.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
|
||||
INTERFACE_FUNCTION(__asan_address_is_poisoned)
|
||||
INTERFACE_FUNCTION(__asan_after_dynamic_init)
|
||||
|
@ -17,19 +17,19 @@
|
||||
#include "asan_interface_internal.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
|
||||
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
|
||||
# error "The AddressSanitizer run-time should not be"
|
||||
" instrumented by AddressSanitizer"
|
||||
# error \
|
||||
"The AddressSanitizer run-time should not be instrumented by AddressSanitizer"
|
||||
#endif
|
||||
|
||||
// Build-time configuration options.
|
||||
|
||||
// If set, asan will intercept C++ exception api call(s).
|
||||
#ifndef ASAN_HAS_EXCEPTIONS
|
||||
# define ASAN_HAS_EXCEPTIONS 1
|
||||
# define ASAN_HAS_EXCEPTIONS 1
|
||||
#endif
|
||||
|
||||
// If set, values like allocator chunk size, as well as defaults for some flags
|
||||
@ -43,11 +43,11 @@
|
||||
#endif
|
||||
|
||||
#ifndef ASAN_DYNAMIC
|
||||
# ifdef PIC
|
||||
# define ASAN_DYNAMIC 1
|
||||
# else
|
||||
# define ASAN_DYNAMIC 0
|
||||
# endif
|
||||
# ifdef PIC
|
||||
# define ASAN_DYNAMIC 1
|
||||
# else
|
||||
# define ASAN_DYNAMIC 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// All internal functions in asan reside inside the __asan namespace
|
||||
@ -123,26 +123,18 @@ void *AsanDlSymNext(const char *sym);
|
||||
// `dlopen()` specific initialization inside this function.
|
||||
bool HandleDlopenInit();
|
||||
|
||||
// Add convenient macro for interface functions that may be represented as
|
||||
// weak hooks.
|
||||
#define ASAN_MALLOC_HOOK(ptr, size) \
|
||||
do { \
|
||||
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \
|
||||
RunMallocHooks(ptr, size); \
|
||||
} while (false)
|
||||
#define ASAN_FREE_HOOK(ptr) \
|
||||
do { \
|
||||
if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \
|
||||
RunFreeHooks(ptr); \
|
||||
} while (false)
|
||||
void InstallAtExitCheckLeaks();
|
||||
|
||||
#define ASAN_ON_ERROR() \
|
||||
if (&__asan_on_error) __asan_on_error()
|
||||
if (&__asan_on_error) \
|
||||
__asan_on_error()
|
||||
|
||||
extern int asan_inited;
|
||||
// Used to avoid infinite recursion in __asan_init().
|
||||
extern bool asan_init_is_running;
|
||||
extern void (*death_callback)(void);
|
||||
// These magic values are written to shadow for better error reporting.
|
||||
// These magic values are written to shadow for better error
|
||||
// reporting.
|
||||
const int kAsanHeapLeftRedzoneMagic = 0xfa;
|
||||
const int kAsanHeapFreeMagic = 0xfd;
|
||||
const int kAsanStackLeftRedzoneMagic = 0xf1;
|
||||
|
@ -107,7 +107,7 @@ uptr FindDynamicShadowStart() {
|
||||
return FindPremappedShadowStart(shadow_size_bytes);
|
||||
#endif
|
||||
|
||||
return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
|
||||
return MapDynamicShadow(shadow_size_bytes, ASAN_SHADOW_SCALE,
|
||||
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
|
||||
}
|
||||
|
||||
@ -131,30 +131,24 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
|
||||
VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n", info->dlpi_name,
|
||||
(void *)info->dlpi_addr);
|
||||
|
||||
// Continue until the first dynamic library is found
|
||||
if (!info->dlpi_name || info->dlpi_name[0] == 0)
|
||||
return 0;
|
||||
const char **name = (const char **)data;
|
||||
|
||||
// Ignore vDSO
|
||||
if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
|
||||
return 0;
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
// Ignore first entry (the main program)
|
||||
char **p = (char **)data;
|
||||
if (!(*p)) {
|
||||
*p = (char *)-1;
|
||||
if (!*name) {
|
||||
*name = "";
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_SOLARIS
|
||||
// Ignore executable on Solaris
|
||||
if (info->dlpi_addr == 0)
|
||||
# if SANITIZER_LINUX
|
||||
// Ignore vDSO. glibc versions earlier than 2.15 (and some patched
|
||||
// by distributors) return an empty name for the vDSO entry, so
|
||||
// detect this as well.
|
||||
if (!info->dlpi_name[0] ||
|
||||
internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
|
||||
return 0;
|
||||
#endif
|
||||
# endif
|
||||
|
||||
*(const char **)data = info->dlpi_name;
|
||||
*name = info->dlpi_name;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -175,7 +169,7 @@ void AsanCheckDynamicRTPrereqs() {
|
||||
// Ensure that dynamic RT is the first DSO in the list
|
||||
const char *first_dso_name = nullptr;
|
||||
dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
|
||||
if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
|
||||
if (first_dso_name && first_dso_name[0] && !IsDynamicRTName(first_dso_name)) {
|
||||
Report("ASan runtime does not come first in initial library list; "
|
||||
"you should either link runtime to your application or "
|
||||
"manually preload it with LD_PRELOAD.\n");
|
||||
|
@ -55,7 +55,7 @@ void *AsanDoesNotSupportStaticLinkage() {
|
||||
}
|
||||
|
||||
uptr FindDynamicShadowStart() {
|
||||
return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
|
||||
return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE,
|
||||
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
#ifndef ASAN_MAPPING_H
|
||||
#define ASAN_MAPPING_H
|
||||
|
||||
#include "asan_internal.h"
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
|
||||
// The full explanation of the memory mapping could be found here:
|
||||
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
|
||||
@ -151,149 +151,145 @@
|
||||
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
|
||||
|
||||
#if defined(ASAN_SHADOW_SCALE)
|
||||
static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
|
||||
#else
|
||||
static const u64 kDefaultShadowScale = 3;
|
||||
#endif
|
||||
static const u64 kDefaultShadowSentinel = ~(uptr)0;
|
||||
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
|
||||
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
|
||||
static const u64 kDefaultShort64bitShadowOffset =
|
||||
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
|
||||
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
|
||||
static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
|
||||
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
|
||||
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
|
||||
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
|
||||
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
||||
static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
||||
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
|
||||
#define SHADOW_SCALE kDefaultShadowScale
|
||||
#define ASAN_SHADOW_SCALE 3
|
||||
|
||||
#if SANITIZER_FUCHSIA
|
||||
# define SHADOW_OFFSET (0)
|
||||
# define ASAN_SHADOW_OFFSET_CONST (0)
|
||||
#elif SANITIZER_WORDSIZE == 32
|
||||
# if SANITIZER_ANDROID
|
||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
# define ASAN_SHADOW_OFFSET_DYNAMIC
|
||||
# elif defined(__mips__)
|
||||
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0aaa0000
|
||||
# elif SANITIZER_FREEBSD
|
||||
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x40000000
|
||||
# elif SANITIZER_NETBSD
|
||||
# define SHADOW_OFFSET kNetBSD_ShadowOffset32
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x40000000
|
||||
# elif SANITIZER_WINDOWS
|
||||
# define SHADOW_OFFSET kWindowsShadowOffset32
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x30000000
|
||||
# elif SANITIZER_IOS
|
||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
# define ASAN_SHADOW_OFFSET_DYNAMIC
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset32
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x20000000
|
||||
# endif
|
||||
#else
|
||||
# if SANITIZER_IOS
|
||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
# define ASAN_SHADOW_OFFSET_DYNAMIC
|
||||
# elif SANITIZER_MAC && defined(__aarch64__)
|
||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
#elif SANITIZER_RISCV64
|
||||
#define SHADOW_OFFSET kRiscv64_ShadowOffset64
|
||||
# define ASAN_SHADOW_OFFSET_DYNAMIC
|
||||
# elif SANITIZER_RISCV64
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000000d55550000
|
||||
# elif defined(__aarch64__)
|
||||
# define SHADOW_OFFSET kAArch64_ShadowOffset64
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
|
||||
# elif defined(__powerpc64__)
|
||||
# define SHADOW_OFFSET kPPC64_ShadowOffset64
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
|
||||
# elif defined(__s390x__)
|
||||
# define SHADOW_OFFSET kSystemZ_ShadowOffset64
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
|
||||
# elif SANITIZER_FREEBSD
|
||||
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000
|
||||
# elif SANITIZER_NETBSD
|
||||
# define SHADOW_OFFSET kNetBSD_ShadowOffset64
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000
|
||||
# elif SANITIZER_MAC
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset64
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
|
||||
# elif defined(__mips64)
|
||||
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
|
||||
#elif defined(__sparc__)
|
||||
#define SHADOW_OFFSET kSPARC64_ShadowOffset64
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000002000000000
|
||||
# elif defined(__sparc__)
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x0000080000000000
|
||||
# elif SANITIZER_WINDOWS64
|
||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
# define ASAN_SHADOW_OFFSET_DYNAMIC
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
|
||||
# if ASAN_SHADOW_SCALE != 3
|
||||
# error "Value below is based on shadow scale = 3."
|
||||
# error "Original formula was: 0x7FFFFFFF & (~0xFFFULL << SHADOW_SCALE)."
|
||||
# endif
|
||||
# define ASAN_SHADOW_OFFSET_CONST 0x000000007fff8000
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID && defined(__arm__)
|
||||
# define ASAN_PREMAP_SHADOW 1
|
||||
#else
|
||||
# define ASAN_PREMAP_SHADOW 0
|
||||
#endif
|
||||
#if defined(__cplusplus)
|
||||
# include "asan_internal.h"
|
||||
|
||||
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
|
||||
static const u64 kDefaultShadowSentinel = ~(uptr)0;
|
||||
|
||||
#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
|
||||
# if defined(ASAN_SHADOW_OFFSET_CONST)
|
||||
static const u64 kConstShadowOffset = ASAN_SHADOW_OFFSET_CONST;
|
||||
# define ASAN_SHADOW_OFFSET kConstShadowOffset
|
||||
# elif defined(ASAN_SHADOW_OFFSET_DYNAMIC)
|
||||
# define ASAN_SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
# else
|
||||
# error "ASAN_SHADOW_OFFSET can't be determined."
|
||||
# endif
|
||||
|
||||
#if DO_ASAN_MAPPING_PROFILE
|
||||
# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
|
||||
#else
|
||||
# define PROFILE_ASAN_MAPPING()
|
||||
#endif
|
||||
# if SANITIZER_ANDROID && defined(__arm__)
|
||||
# define ASAN_PREMAP_SHADOW 1
|
||||
# else
|
||||
# define ASAN_PREMAP_SHADOW 0
|
||||
# endif
|
||||
|
||||
# define ASAN_SHADOW_GRANULARITY (1ULL << ASAN_SHADOW_SCALE)
|
||||
|
||||
# define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
|
||||
|
||||
# if DO_ASAN_MAPPING_PROFILE
|
||||
# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
|
||||
# else
|
||||
# define PROFILE_ASAN_MAPPING()
|
||||
# endif
|
||||
|
||||
// If 1, all shadow boundaries are constants.
|
||||
// Don't set to 1 other than for testing.
|
||||
#define ASAN_FIXED_MAPPING 0
|
||||
# define ASAN_FIXED_MAPPING 0
|
||||
|
||||
namespace __asan {
|
||||
|
||||
extern uptr AsanMappingProfile[];
|
||||
|
||||
#if ASAN_FIXED_MAPPING
|
||||
# if ASAN_FIXED_MAPPING
|
||||
// Fixed mapping for 64-bit Linux. Mostly used for performance comparison
|
||||
// with non-fixed mapping. As of r175253 (Feb 2013) the performance
|
||||
// difference between fixed and non-fixed mapping is below the noise level.
|
||||
static uptr kHighMemEnd = 0x7fffffffffffULL;
|
||||
static uptr kMidMemBeg = 0x3000000000ULL;
|
||||
static uptr kMidMemEnd = 0x4fffffffffULL;
|
||||
#else
|
||||
static uptr kMidMemBeg = 0x3000000000ULL;
|
||||
static uptr kMidMemEnd = 0x4fffffffffULL;
|
||||
# else
|
||||
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
|
||||
#endif
|
||||
# endif
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
|
||||
# include "asan_mapping_sparc64.h"
|
||||
#else
|
||||
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
|
||||
# if defined(__sparc__) && SANITIZER_WORDSIZE == 64
|
||||
# include "asan_mapping_sparc64.h"
|
||||
# else
|
||||
# define MEM_TO_SHADOW(mem) \
|
||||
(((mem) >> ASAN_SHADOW_SCALE) + (ASAN_SHADOW_OFFSET))
|
||||
|
||||
#define kLowMemBeg 0
|
||||
#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)
|
||||
# define kLowMemBeg 0
|
||||
# define kLowMemEnd (ASAN_SHADOW_OFFSET ? ASAN_SHADOW_OFFSET - 1 : 0)
|
||||
|
||||
#define kLowShadowBeg SHADOW_OFFSET
|
||||
#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
|
||||
# define kLowShadowBeg ASAN_SHADOW_OFFSET
|
||||
# define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
|
||||
|
||||
#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1)
|
||||
# define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1)
|
||||
|
||||
#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
|
||||
#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
|
||||
# define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
|
||||
# define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
|
||||
|
||||
# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg)
|
||||
# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd)
|
||||
# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg)
|
||||
# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd)
|
||||
|
||||
// With the zero shadow base we can not actually map pages starting from 0.
|
||||
// This constant is somewhat arbitrary.
|
||||
#define kZeroBaseShadowStart 0
|
||||
#define kZeroBaseMaxShadowStart (1 << 18)
|
||||
# define kZeroBaseShadowStart 0
|
||||
# define kZeroBaseMaxShadowStart (1 << 18)
|
||||
|
||||
#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
|
||||
: kZeroBaseShadowStart)
|
||||
#define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1)
|
||||
# define kShadowGapBeg \
|
||||
(kLowShadowEnd ? kLowShadowEnd + 1 : kZeroBaseShadowStart)
|
||||
# define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1)
|
||||
|
||||
#define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0)
|
||||
#define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0)
|
||||
# define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0)
|
||||
# define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0)
|
||||
|
||||
#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
|
||||
#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
|
||||
# define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
|
||||
# define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
|
||||
|
||||
namespace __asan {
|
||||
|
||||
@ -331,29 +327,31 @@ static inline bool AddrIsInShadowGap(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
if (kMidMemBeg) {
|
||||
if (a <= kShadowGapEnd)
|
||||
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
|
||||
return ASAN_SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
|
||||
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
|
||||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
|
||||
}
|
||||
// In zero-based shadow mode we treat addresses near zero as addresses
|
||||
// in shadow gap as well.
|
||||
if (SHADOW_OFFSET == 0)
|
||||
if (ASAN_SHADOW_OFFSET == 0)
|
||||
return a <= kShadowGapEnd;
|
||||
return a >= kShadowGapBeg && a <= kShadowGapEnd;
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif
|
||||
# endif
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
|
||||
static inline uptr MemToShadowSize(uptr size) {
|
||||
return size >> ASAN_SHADOW_SCALE;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
|
||||
(flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
|
||||
(flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
|
||||
}
|
||||
|
||||
static inline uptr MemToShadow(uptr p) {
|
||||
@ -369,17 +367,17 @@ static inline bool AddrIsInShadow(uptr a) {
|
||||
|
||||
static inline bool AddrIsAlignedByGranularity(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return (a & (SHADOW_GRANULARITY - 1)) == 0;
|
||||
return (a & (ASAN_SHADOW_GRANULARITY - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline bool AddressIsPoisoned(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
const uptr kAccessSize = 1;
|
||||
u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
|
||||
u8 *shadow_address = (u8 *)MEM_TO_SHADOW(a);
|
||||
s8 shadow_value = *shadow_address;
|
||||
if (shadow_value) {
|
||||
u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1))
|
||||
+ kAccessSize - 1;
|
||||
u8 last_accessed_byte =
|
||||
(a & (ASAN_SHADOW_GRANULARITY - 1)) + kAccessSize - 1;
|
||||
return (last_accessed_byte >= shadow_value);
|
||||
}
|
||||
return false;
|
||||
@ -390,4 +388,6 @@ static const uptr kAsanMappingProfileSize = __LINE__;
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // ASAN_MAPPING_H
|
||||
|
@ -25,13 +25,14 @@
|
||||
// The idea is to chop the high bits before doing the scaling, so the two
|
||||
// parts become contiguous again and the usual scheme can be applied.
|
||||
|
||||
#define MEM_TO_SHADOW(mem) \
|
||||
((((mem) << HIGH_BITS) >> (HIGH_BITS + (SHADOW_SCALE))) + (SHADOW_OFFSET))
|
||||
#define MEM_TO_SHADOW(mem) \
|
||||
((((mem) << HIGH_BITS) >> (HIGH_BITS + (ASAN_SHADOW_SCALE))) + \
|
||||
(ASAN_SHADOW_OFFSET))
|
||||
|
||||
#define kLowMemBeg 0
|
||||
#define kLowMemEnd (SHADOW_OFFSET - 1)
|
||||
#define kLowMemEnd (ASAN_SHADOW_OFFSET - 1)
|
||||
|
||||
#define kLowShadowBeg SHADOW_OFFSET
|
||||
#define kLowShadowBeg ASAN_SHADOW_OFFSET
|
||||
#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
|
||||
|
||||
// But of course there is the huge hole between the high shadow memory,
|
||||
|
@ -12,11 +12,13 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_poisoning.h"
|
||||
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_interface_internal.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
@ -35,7 +37,7 @@ void PoisonShadow(uptr addr, uptr size, u8 value) {
|
||||
CHECK(AddrIsAlignedByGranularity(addr));
|
||||
CHECK(AddrIsInMem(addr));
|
||||
CHECK(AddrIsAlignedByGranularity(addr + size));
|
||||
CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
|
||||
CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));
|
||||
CHECK(REAL(memset));
|
||||
FastPoisonShadow(addr, size, value);
|
||||
}
|
||||
@ -52,12 +54,12 @@ void PoisonShadowPartialRightRedzone(uptr addr,
|
||||
|
||||
struct ShadowSegmentEndpoint {
|
||||
u8 *chunk;
|
||||
s8 offset; // in [0, SHADOW_GRANULARITY)
|
||||
s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)
|
||||
s8 value; // = *chunk;
|
||||
|
||||
explicit ShadowSegmentEndpoint(uptr address) {
|
||||
chunk = (u8*)MemToShadow(address);
|
||||
offset = address & (SHADOW_GRANULARITY - 1);
|
||||
offset = address & (ASAN_SHADOW_GRANULARITY - 1);
|
||||
value = *chunk;
|
||||
}
|
||||
};
|
||||
@ -72,14 +74,14 @@ void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
|
||||
}
|
||||
CHECK(size);
|
||||
CHECK_LE(size, 4096);
|
||||
CHECK(IsAligned(end, SHADOW_GRANULARITY));
|
||||
if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
|
||||
CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));
|
||||
if (!IsAligned(ptr, ASAN_SHADOW_GRANULARITY)) {
|
||||
*(u8 *)MemToShadow(ptr) =
|
||||
poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
|
||||
ptr |= SHADOW_GRANULARITY - 1;
|
||||
poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;
|
||||
ptr |= ASAN_SHADOW_GRANULARITY - 1;
|
||||
ptr++;
|
||||
}
|
||||
for (; ptr < end; ptr += SHADOW_GRANULARITY)
|
||||
for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)
|
||||
*(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
|
||||
}
|
||||
|
||||
@ -181,12 +183,12 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
|
||||
if (!AddrIsInMem(end))
|
||||
return end;
|
||||
CHECK_LT(beg, end);
|
||||
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
|
||||
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
|
||||
uptr aligned_b = RoundUpTo(beg, ASAN_SHADOW_GRANULARITY);
|
||||
uptr aligned_e = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
|
||||
uptr shadow_beg = MemToShadow(aligned_b);
|
||||
uptr shadow_end = MemToShadow(aligned_e);
|
||||
// First check the first and the last application bytes,
|
||||
// then check the SHADOW_GRANULARITY-aligned region by calling
|
||||
// then check the ASAN_SHADOW_GRANULARITY-aligned region by calling
|
||||
// mem_is_zero on the corresponding shadow.
|
||||
if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) &&
|
||||
(shadow_end <= shadow_beg ||
|
||||
@ -285,7 +287,7 @@ uptr __asan_load_cxx_array_cookie(uptr *p) {
|
||||
// assumes that left border of region to be poisoned is properly aligned.
|
||||
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
|
||||
if (size == 0) return;
|
||||
uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
|
||||
uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);
|
||||
PoisonShadow(addr, aligned_size,
|
||||
do_poison ? kAsanStackUseAfterScopeMagic : 0);
|
||||
if (size == aligned_size)
|
||||
@ -351,7 +353,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
|
||||
uptr end = reinterpret_cast<uptr>(end_p);
|
||||
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
|
||||
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
|
||||
uptr granularity = SHADOW_GRANULARITY;
|
||||
uptr granularity = ASAN_SHADOW_GRANULARITY;
|
||||
if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
|
||||
IsAligned(beg, granularity))) {
|
||||
GET_STACK_TRACE_FATAL_HERE;
|
||||
|
@ -44,8 +44,8 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
|
||||
common_flags()->clear_shadow_mmap_threshold);
|
||||
#else
|
||||
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
|
||||
uptr shadow_end = MEM_TO_SHADOW(
|
||||
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
|
||||
uptr shadow_end =
|
||||
MEM_TO_SHADOW(aligned_beg + aligned_size - ASAN_SHADOW_GRANULARITY) + 1;
|
||||
// FIXME: Page states are different on Windows, so using the same interface
|
||||
// for mapping shadow and zeroing out pages doesn't "just work", so we should
|
||||
// probably provide higher-level interface for these operations.
|
||||
@ -78,11 +78,12 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
|
||||
DCHECK(CanPoisonMemory());
|
||||
bool poison_partial = flags()->poison_partial;
|
||||
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
|
||||
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
|
||||
if (i + SHADOW_GRANULARITY <= size) {
|
||||
for (uptr i = 0; i < redzone_size; i += ASAN_SHADOW_GRANULARITY, shadow++) {
|
||||
if (i + ASAN_SHADOW_GRANULARITY <= size) {
|
||||
*shadow = 0; // fully addressable
|
||||
} else if (i >= size) {
|
||||
*shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
|
||||
*shadow =
|
||||
(ASAN_SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
|
||||
} else {
|
||||
// first size-i bytes are addressable
|
||||
*shadow = poison_partial ? static_cast<u8>(size - i) : 0;
|
||||
|
@ -14,22 +14,23 @@
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_POSIX
|
||||
|
||||
#include "asan_internal.h"
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_posix.h"
|
||||
#include "sanitizer_common/sanitizer_procmaps.h"
|
||||
# include <pthread.h>
|
||||
# include <signal.h>
|
||||
# include <stdlib.h>
|
||||
# include <sys/resource.h>
|
||||
# include <sys/time.h>
|
||||
# include <unistd.h>
|
||||
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <unistd.h>
|
||||
# include "asan_interceptors.h"
|
||||
# include "asan_internal.h"
|
||||
# include "asan_mapping.h"
|
||||
# include "asan_poisoning.h"
|
||||
# include "asan_report.h"
|
||||
# include "asan_stack.h"
|
||||
# include "lsan/lsan_common.h"
|
||||
# include "sanitizer_common/sanitizer_libc.h"
|
||||
# include "sanitizer_common/sanitizer_posix.h"
|
||||
# include "sanitizer_common/sanitizer_procmaps.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
@ -131,7 +132,7 @@ void AsanTSDSet(void *tsd) {
|
||||
}
|
||||
|
||||
void PlatformTSDDtor(void *tsd) {
|
||||
AsanThreadContext *context = (AsanThreadContext*)tsd;
|
||||
AsanThreadContext *context = (AsanThreadContext *)tsd;
|
||||
if (context->destructor_iterations > 1) {
|
||||
context->destructor_iterations--;
|
||||
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
|
||||
@ -140,6 +141,18 @@ void PlatformTSDDtor(void *tsd) {
|
||||
AsanThread::TSDDtor(tsd);
|
||||
}
|
||||
#endif
|
||||
|
||||
void InstallAtExitCheckLeaks() {
|
||||
if (CAN_SANITIZE_LEAKS) {
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
if (flags()->halt_on_error)
|
||||
Atexit(__lsan::DoLeakCheck);
|
||||
else
|
||||
Atexit(__lsan::DoRecoverableLeakCheckVoid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // SANITIZER_POSIX
|
||||
|
@ -26,7 +26,7 @@ namespace __asan {
|
||||
// Conservative upper limit.
|
||||
uptr PremapShadowSize() {
|
||||
uptr granularity = GetMmapGranularity();
|
||||
return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity);
|
||||
return RoundUpTo(GetMaxVirtualAddress() >> ASAN_SHADOW_SCALE, granularity);
|
||||
}
|
||||
|
||||
// Returns an address aligned to 8 pages, such that one page on the left and
|
||||
|
@ -11,17 +11,19 @@
|
||||
// This file contains error reporting code.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_report.h"
|
||||
|
||||
#include "asan_descriptions.h"
|
||||
#include "asan_errors.h"
|
||||
#include "asan_flags.h"
|
||||
#include "asan_descriptions.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_scariness_score.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_interface_internal.h"
|
||||
#include "sanitizer_common/sanitizer_report_decorator.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
@ -460,6 +462,10 @@ static bool SuppressErrorReport(uptr pc) {
|
||||
|
||||
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
|
||||
uptr access_size, u32 exp, bool fatal) {
|
||||
if (__asan_test_only_reported_buggy_pointer) {
|
||||
*__asan_test_only_reported_buggy_pointer = addr;
|
||||
return;
|
||||
}
|
||||
if (!fatal && SuppressErrorReport(pc)) return;
|
||||
ENABLE_FRAME_POINTER;
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_interface_internal.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
#include "ubsan/ubsan_init.h"
|
||||
@ -44,7 +45,9 @@ static void AsanDie() {
|
||||
static atomic_uint32_t num_calls;
|
||||
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
|
||||
// Don't die twice - run a busy loop.
|
||||
while (1) { }
|
||||
while (1) {
|
||||
internal_sched_yield();
|
||||
}
|
||||
}
|
||||
if (common_flags()->print_module_map >= 1)
|
||||
DumpProcessMap();
|
||||
@ -85,12 +88,8 @@ void ShowStatsAndAbort() {
|
||||
NOINLINE
|
||||
static void ReportGenericErrorWrapper(uptr addr, bool is_write, int size,
|
||||
int exp_arg, bool fatal) {
|
||||
if (__asan_test_only_reported_buggy_pointer) {
|
||||
*__asan_test_only_reported_buggy_pointer = addr;
|
||||
} else {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);
|
||||
}
|
||||
GET_CALLER_PC_BP_SP;
|
||||
ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);
|
||||
}
|
||||
|
||||
// --------------- LowLevelAllocateCallbac ---------- {{{1
|
||||
@ -150,11 +149,11 @@ ASAN_REPORT_ERROR_N(store, true)
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
|
||||
uptr sp = MEM_TO_SHADOW(addr); \
|
||||
uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
|
||||
: *reinterpret_cast<u16 *>(sp); \
|
||||
uptr s = size <= ASAN_SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
|
||||
: *reinterpret_cast<u16 *>(sp); \
|
||||
if (UNLIKELY(s)) { \
|
||||
if (UNLIKELY(size >= SHADOW_GRANULARITY || \
|
||||
((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
|
||||
if (UNLIKELY(size >= ASAN_SHADOW_GRANULARITY || \
|
||||
((s8)((addr & (ASAN_SHADOW_GRANULARITY - 1)) + size - 1)) >= \
|
||||
(s8)s)) { \
|
||||
ReportGenericErrorWrapper(addr, is_write, size, exp_arg, fatal); \
|
||||
} \
|
||||
@ -188,7 +187,7 @@ ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_loadN(uptr addr, uptr size) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
if ((addr = __asan_region_is_poisoned(addr, size))) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
ReportGenericError(pc, bp, sp, addr, false, size, 0, true);
|
||||
}
|
||||
@ -197,7 +196,7 @@ void __asan_loadN(uptr addr, uptr size) {
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
if ((addr = __asan_region_is_poisoned(addr, size))) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
ReportGenericError(pc, bp, sp, addr, false, size, exp, true);
|
||||
}
|
||||
@ -206,7 +205,7 @@ void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_loadN_noabort(uptr addr, uptr size) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
if ((addr = __asan_region_is_poisoned(addr, size))) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
ReportGenericError(pc, bp, sp, addr, false, size, 0, false);
|
||||
}
|
||||
@ -215,7 +214,7 @@ void __asan_loadN_noabort(uptr addr, uptr size) {
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_storeN(uptr addr, uptr size) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
if ((addr = __asan_region_is_poisoned(addr, size))) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
ReportGenericError(pc, bp, sp, addr, true, size, 0, true);
|
||||
}
|
||||
@ -224,7 +223,7 @@ void __asan_storeN(uptr addr, uptr size) {
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
if ((addr = __asan_region_is_poisoned(addr, size))) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
ReportGenericError(pc, bp, sp, addr, true, size, exp, true);
|
||||
}
|
||||
@ -233,7 +232,7 @@ void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_storeN_noabort(uptr addr, uptr size) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
if ((addr = __asan_region_is_poisoned(addr, size))) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
ReportGenericError(pc, bp, sp, addr, true, size, 0, false);
|
||||
}
|
||||
@ -313,7 +312,7 @@ static void InitializeHighMemEnd() {
|
||||
kHighMemEnd = GetMaxUserVirtualAddress();
|
||||
// Increase kHighMemEnd to make sure it's properly
|
||||
// aligned together with kHighMemBeg:
|
||||
kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
|
||||
kHighMemEnd |= (GetMmapGranularity() << ASAN_SHADOW_SCALE) - 1;
|
||||
#endif // !ASAN_FIXED_MAPPING
|
||||
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
|
||||
}
|
||||
@ -365,29 +364,16 @@ void PrintAddressSpaceLayout() {
|
||||
Printf("malloc_context_size=%zu\n",
|
||||
(uptr)common_flags()->malloc_context_size);
|
||||
|
||||
Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
|
||||
Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
|
||||
Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
|
||||
CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
|
||||
Printf("SHADOW_SCALE: %d\n", (int)ASAN_SHADOW_SCALE);
|
||||
Printf("SHADOW_GRANULARITY: %d\n", (int)ASAN_SHADOW_GRANULARITY);
|
||||
Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)ASAN_SHADOW_OFFSET);
|
||||
CHECK(ASAN_SHADOW_SCALE >= 3 && ASAN_SHADOW_SCALE <= 7);
|
||||
if (kMidMemBeg)
|
||||
CHECK(kMidShadowBeg > kLowShadowEnd &&
|
||||
kMidMemBeg > kMidShadowEnd &&
|
||||
kHighShadowBeg > kMidMemEnd);
|
||||
}
|
||||
|
||||
#if defined(__thumb__) && defined(__linux__)
|
||||
#define START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
|
||||
#endif
|
||||
|
||||
#ifndef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
|
||||
static bool UNUSED __local_asan_dyninit = [] {
|
||||
MaybeStartBackgroudThread();
|
||||
SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
|
||||
|
||||
return false;
|
||||
}();
|
||||
#endif
|
||||
|
||||
static void AsanInitInternal() {
|
||||
if (LIKELY(asan_inited)) return;
|
||||
SanitizerToolName = "AddressSanitizer";
|
||||
@ -438,7 +424,7 @@ static void AsanInitInternal() {
|
||||
MaybeReexec();
|
||||
|
||||
// Setup internal allocator callback.
|
||||
SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
|
||||
SetLowLevelAllocateMinAlignment(ASAN_SHADOW_GRANULARITY);
|
||||
SetLowLevelAllocateCallback(OnLowLevelAllocate);
|
||||
|
||||
InitializeAsanInterceptors();
|
||||
@ -462,10 +448,8 @@ static void AsanInitInternal() {
|
||||
allocator_options.SetFrom(flags(), common_flags());
|
||||
InitializeAllocator(allocator_options);
|
||||
|
||||
#ifdef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
|
||||
MaybeStartBackgroudThread();
|
||||
SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
|
||||
#endif
|
||||
if (SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL)
|
||||
MaybeStartBackgroudThread();
|
||||
|
||||
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
|
||||
// should be set to 1 prior to initializing the threads.
|
||||
@ -493,12 +477,7 @@ static void AsanInitInternal() {
|
||||
|
||||
if (CAN_SANITIZE_LEAKS) {
|
||||
__lsan::InitCommonLsan();
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
if (flags()->halt_on_error)
|
||||
Atexit(__lsan::DoLeakCheck);
|
||||
else
|
||||
Atexit(__lsan::DoRecoverableLeakCheckVoid);
|
||||
}
|
||||
InstallAtExitCheckLeaks();
|
||||
}
|
||||
|
||||
#if CAN_SANITIZE_UB
|
||||
@ -561,7 +540,7 @@ void UnpoisonStack(uptr bottom, uptr top, const char *type) {
|
||||
top - bottom);
|
||||
return;
|
||||
}
|
||||
PoisonShadow(bottom, RoundUpTo(top - bottom, SHADOW_GRANULARITY), 0);
|
||||
PoisonShadow(bottom, RoundUpTo(top - bottom, ASAN_SHADOW_GRANULARITY), 0);
|
||||
}
|
||||
|
||||
static void UnpoisonDefaultStack() {
|
||||
|
36
libsanitizer/asan/asan_rtl_static.cpp
Normal file
36
libsanitizer/asan/asan_rtl_static.cpp
Normal file
@ -0,0 +1,36 @@
|
||||
//===-- asan_static_rtl.cpp -----------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Main file of the ASan run-time library.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// This file is empty for now. Main reason to have it is workaround for Windows
|
||||
// build, which complains because no files are part of the asan_static lib.
|
||||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
|
||||
#define REPORT_FUNCTION(Name) \
|
||||
extern "C" SANITIZER_WEAK_ATTRIBUTE void Name(__asan::uptr addr); \
|
||||
extern "C" void Name##_asm(uptr addr) { Name(addr); }
|
||||
|
||||
namespace __asan {
|
||||
|
||||
REPORT_FUNCTION(__asan_report_load1)
|
||||
REPORT_FUNCTION(__asan_report_load2)
|
||||
REPORT_FUNCTION(__asan_report_load4)
|
||||
REPORT_FUNCTION(__asan_report_load8)
|
||||
REPORT_FUNCTION(__asan_report_load16)
|
||||
REPORT_FUNCTION(__asan_report_store1)
|
||||
REPORT_FUNCTION(__asan_report_store2)
|
||||
REPORT_FUNCTION(__asan_report_store4)
|
||||
REPORT_FUNCTION(__asan_report_store8)
|
||||
REPORT_FUNCTION(__asan_report_store16)
|
||||
|
||||
} // namespace __asan
|
146
libsanitizer/asan/asan_rtl_x86_64.S
Normal file
146
libsanitizer/asan/asan_rtl_x86_64.S
Normal file
@ -0,0 +1,146 @@
|
||||
#include "asan_mapping.h"
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
|
||||
#if defined(__x86_64__)
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
|
||||
.file "asan_rtl_x86_64.S"
|
||||
|
||||
#define NAME(n, reg, op, s, i) n##_##op##_##i##_##s##_##reg
|
||||
|
||||
#define FNAME(reg, op, s, i) NAME(__asan_check, reg, op, s, i)
|
||||
#define RLABEL(reg, op, s, i) NAME(.return, reg, op, s, i)
|
||||
#define CLABEL(reg, op, s, i) NAME(.check, reg, op, s, i)
|
||||
#define FLABEL(reg, op, s, i) NAME(.fail, reg, op, s, i)
|
||||
|
||||
#define BEGINF(reg, op, s, i) \
|
||||
.section .text.FNAME(reg, op, s, i),"ax",@progbits ;\
|
||||
.globl FNAME(reg, op, s, i) ;\
|
||||
.hidden FNAME(reg, op, s, i) ;\
|
||||
ASM_TYPE_FUNCTION(FNAME(reg, op, s, i)) ;\
|
||||
.cfi_startproc ;\
|
||||
FNAME(reg, op, s, i): ;\
|
||||
|
||||
#define ENDF .cfi_endproc ;\
|
||||
|
||||
// Access check functions for 1,2 and 4 byte types, which require extra checks.
|
||||
#define ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, s) \
|
||||
mov %##reg,%r10 ;\
|
||||
shr $0x3,%r10 ;\
|
||||
movsbl ASAN_SHADOW_OFFSET_CONST(%r10),%r10d ;\
|
||||
test %r10d,%r10d ;\
|
||||
jne CLABEL(reg, op, s, add) ;\
|
||||
RLABEL(reg, op, s, add): ;\
|
||||
retq ;\
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, i) \
|
||||
CLABEL(reg, op, 1, i): ;\
|
||||
push %rcx ;\
|
||||
mov %##reg,%rcx ;\
|
||||
and $0x7,%ecx ;\
|
||||
cmp %r10d,%ecx ;\
|
||||
pop %rcx ;\
|
||||
jl RLABEL(reg, op, 1, i);\
|
||||
mov %##reg,%rdi ;\
|
||||
jmp __asan_report_##op##1_asm ;\
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, i) \
|
||||
CLABEL(reg, op, 2, i): ;\
|
||||
push %rcx ;\
|
||||
mov %##reg,%rcx ;\
|
||||
and $0x7,%ecx ;\
|
||||
add $0x1,%ecx ;\
|
||||
cmp %r10d,%ecx ;\
|
||||
pop %rcx ;\
|
||||
jl RLABEL(reg, op, 2, i);\
|
||||
mov %##reg,%rdi ;\
|
||||
jmp __asan_report_##op##2_asm ;\
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, i) \
|
||||
CLABEL(reg, op, 4, i): ;\
|
||||
push %rcx ;\
|
||||
mov %##reg,%rcx ;\
|
||||
and $0x7,%ecx ;\
|
||||
add $0x3,%ecx ;\
|
||||
cmp %r10d,%ecx ;\
|
||||
pop %rcx ;\
|
||||
jl RLABEL(reg, op, 4, i);\
|
||||
mov %##reg,%rdi ;\
|
||||
jmp __asan_report_##op##4_asm ;\
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, op) \
|
||||
BEGINF(reg, op, 1, add) ;\
|
||||
ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 1) ;\
|
||||
ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, add) ;\
|
||||
ENDF
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, op) \
|
||||
BEGINF(reg, op, 2, add) ;\
|
||||
ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 2) ;\
|
||||
ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, add) ;\
|
||||
ENDF
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, op) \
|
||||
BEGINF(reg, op, 4, add) ;\
|
||||
ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 4) ;\
|
||||
ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, add) ;\
|
||||
ENDF
|
||||
|
||||
// Access check functions for 8 and 16 byte types: no extra checks required.
|
||||
#define ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, s, c) \
|
||||
mov %##reg,%r10 ;\
|
||||
shr $0x3,%r10 ;\
|
||||
##c $0x0,ASAN_SHADOW_OFFSET_CONST(%r10) ;\
|
||||
jne FLABEL(reg, op, s, add) ;\
|
||||
retq ;\
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_FAIL(reg, op, s, i) \
|
||||
FLABEL(reg, op, s, i): ;\
|
||||
mov %##reg,%rdi ;\
|
||||
jmp __asan_report_##op##s##_asm;\
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, op) \
|
||||
BEGINF(reg, op, 8, add) ;\
|
||||
ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, 8, cmpb) ;\
|
||||
ASAN_MEMORY_ACCESS_FAIL(reg, op, 8, add) ;\
|
||||
ENDF
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, op) \
|
||||
BEGINF(reg, op, 16, add) ;\
|
||||
ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, 16, cmpw) ;\
|
||||
ASAN_MEMORY_ACCESS_FAIL(reg, op, 16, add) ;\
|
||||
ENDF
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACKS_ADD(reg) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, load) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, store) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, load) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, store) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, load) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, store) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, load) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, store) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, load) \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, store) \
|
||||
|
||||
|
||||
// Instantiate all but R10 and R11 callbacks. We are using PLTSafe class with
|
||||
// the intrinsic, which guarantees that the code generation will never emit
|
||||
// R10 or R11 callback.
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RAX)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBX)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RCX)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDX)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RSI)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDI)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBP)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R8)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R9)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R12)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R13)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R14)
|
||||
ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R15)
|
||||
|
||||
#endif
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
@ -83,8 +83,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
|
||||
thread->start_routine_ = start_routine;
|
||||
thread->arg_ = arg;
|
||||
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
|
||||
asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
|
||||
parent_tid, &args);
|
||||
asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
|
||||
|
||||
return thread;
|
||||
}
|
||||
@ -306,7 +305,7 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
|
||||
uptr stack_size = 0;
|
||||
GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
|
||||
&tls_begin_, &tls_size);
|
||||
stack_top_ = RoundDownTo(stack_bottom_ + stack_size, SHADOW_GRANULARITY);
|
||||
stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY);
|
||||
tls_end_ = tls_begin_ + tls_size;
|
||||
dtls_ = DTLS_Get();
|
||||
|
||||
@ -322,11 +321,9 @@ void AsanThread::ClearShadowForThreadStackAndTLS() {
|
||||
if (stack_top_ != stack_bottom_)
|
||||
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
|
||||
if (tls_begin_ != tls_end_) {
|
||||
uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
|
||||
uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
|
||||
FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
|
||||
tls_end_ - tls_begin_aligned,
|
||||
tls_end_aligned - tls_end_, 0);
|
||||
uptr tls_begin_aligned = RoundDownTo(tls_begin_, ASAN_SHADOW_GRANULARITY);
|
||||
uptr tls_end_aligned = RoundUpTo(tls_end_, ASAN_SHADOW_GRANULARITY);
|
||||
FastPoisonShadow(tls_begin_aligned, tls_end_aligned - tls_begin_aligned, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -347,27 +344,27 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
|
||||
return true;
|
||||
}
|
||||
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
|
||||
uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
|
||||
uptr mem_ptr = RoundDownTo(aligned_addr, ASAN_SHADOW_GRANULARITY);
|
||||
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
|
||||
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
|
||||
|
||||
while (shadow_ptr >= shadow_bottom &&
|
||||
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
|
||||
shadow_ptr--;
|
||||
mem_ptr -= SHADOW_GRANULARITY;
|
||||
mem_ptr -= ASAN_SHADOW_GRANULARITY;
|
||||
}
|
||||
|
||||
while (shadow_ptr >= shadow_bottom &&
|
||||
*shadow_ptr == kAsanStackLeftRedzoneMagic) {
|
||||
shadow_ptr--;
|
||||
mem_ptr -= SHADOW_GRANULARITY;
|
||||
mem_ptr -= ASAN_SHADOW_GRANULARITY;
|
||||
}
|
||||
|
||||
if (shadow_ptr < shadow_bottom) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
|
||||
uptr *ptr = (uptr *)(mem_ptr + ASAN_SHADOW_GRANULARITY);
|
||||
CHECK(ptr[0] == kCurrentStackFrameMagic);
|
||||
access->offset = addr - (uptr)ptr;
|
||||
access->frame_pc = ptr[2];
|
||||
|
@ -1,4 +1,5 @@
|
||||
//===-- asan_win.cpp ------------------------------------------------------===//
|
||||
//===-- asan_win.cpp
|
||||
//------------------------------------------------------===//>
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
@ -13,21 +14,20 @@
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_WINDOWS
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# include <stdlib.h>
|
||||
# include <windows.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
#include "sanitizer_common/sanitizer_win.h"
|
||||
#include "sanitizer_common/sanitizer_win_defs.h"
|
||||
# include "asan_interceptors.h"
|
||||
# include "asan_internal.h"
|
||||
# include "asan_mapping.h"
|
||||
# include "asan_report.h"
|
||||
# include "asan_stack.h"
|
||||
# include "asan_thread.h"
|
||||
# include "sanitizer_common/sanitizer_libc.h"
|
||||
# include "sanitizer_common/sanitizer_mutex.h"
|
||||
# include "sanitizer_common/sanitizer_win.h"
|
||||
# include "sanitizer_common/sanitizer_win_defs.h"
|
||||
|
||||
using namespace __asan;
|
||||
|
||||
@ -49,8 +49,8 @@ uptr __asan_get_shadow_memory_dynamic_address() {
|
||||
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
|
||||
static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler;
|
||||
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
|
||||
long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE long __asan_unhandled_exception_filter(
|
||||
EXCEPTION_POINTERS *info) {
|
||||
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
|
||||
CONTEXT *context = info->ContextRecord;
|
||||
|
||||
@ -187,6 +187,8 @@ void InitializePlatformInterceptors() {
|
||||
}
|
||||
}
|
||||
|
||||
void InstallAtExitCheckLeaks() {}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
@ -253,7 +255,7 @@ void *AsanDoesNotSupportStaticLinkage() {
|
||||
}
|
||||
|
||||
uptr FindDynamicShadowStart() {
|
||||
return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
|
||||
return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE,
|
||||
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
|
||||
}
|
||||
|
||||
|
@ -56,6 +56,13 @@ INTERCEPT_WRAP_W_W(_expand_dbg)
|
||||
|
||||
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cpp)
|
||||
|
||||
# if defined(_MSC_VER) && !defined(__clang__)
|
||||
// Disable warnings such as: 'void memchr(void)': incorrect number of arguments
|
||||
// for intrinsic function, expected '3' arguments.
|
||||
# pragma warning(push)
|
||||
# pragma warning(disable : 4392)
|
||||
# endif
|
||||
|
||||
INTERCEPT_LIBRARY_FUNCTION(atoi);
|
||||
INTERCEPT_LIBRARY_FUNCTION(atol);
|
||||
INTERCEPT_LIBRARY_FUNCTION(frexp);
|
||||
@ -87,6 +94,10 @@ INTERCEPT_LIBRARY_FUNCTION(strtol);
|
||||
INTERCEPT_LIBRARY_FUNCTION(wcslen);
|
||||
INTERCEPT_LIBRARY_FUNCTION(wcsnlen);
|
||||
|
||||
# if defined(_MSC_VER) && !defined(__clang__)
|
||||
# pragma warning(pop)
|
||||
# endif
|
||||
|
||||
#ifdef _WIN64
|
||||
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
|
||||
#else
|
||||
|
@ -14,6 +14,12 @@
|
||||
#ifndef COMPILERRT_ASSEMBLY_H
|
||||
#define COMPILERRT_ASSEMBLY_H
|
||||
|
||||
#if defined(__linux__) && defined(__CET__)
|
||||
#if __has_include(<cet.h>)
|
||||
#include <cet.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
#define SEPARATOR %%
|
||||
#else
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flag_parser.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_interface_internal.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_procmaps.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
|
@ -172,21 +172,6 @@ void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
|
||||
|
||||
} // namespace __hwasan
|
||||
|
||||
#define HWASAN_MALLOC_HOOK(ptr, size) \
|
||||
do { \
|
||||
if (&__sanitizer_malloc_hook) { \
|
||||
__sanitizer_malloc_hook(ptr, size); \
|
||||
} \
|
||||
RunMallocHooks(ptr, size); \
|
||||
} while (false)
|
||||
#define HWASAN_FREE_HOOK(ptr) \
|
||||
do { \
|
||||
if (&__sanitizer_free_hook) { \
|
||||
__sanitizer_free_hook(ptr); \
|
||||
} \
|
||||
RunFreeHooks(ptr); \
|
||||
} while (false)
|
||||
|
||||
#if HWASAN_WITH_INTERCEPTORS
|
||||
// For both bionic and glibc __sigset_t is an unsigned long.
|
||||
typedef unsigned long __hw_sigset_t;
|
||||
|
@ -132,6 +132,11 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
||||
}
|
||||
ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
|
||||
}
|
||||
if (UNLIKELY(IsRssLimitExceeded())) {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportRssLimitExceeded(stack);
|
||||
}
|
||||
|
||||
alignment = Max(alignment, kShadowAlignment);
|
||||
uptr size = TaggedSize(orig_size);
|
||||
@ -194,7 +199,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
|
||||
}
|
||||
}
|
||||
|
||||
HWASAN_MALLOC_HOOK(user_ptr, size);
|
||||
RunMallocHooks(user_ptr, size);
|
||||
return user_ptr;
|
||||
}
|
||||
|
||||
@ -221,7 +226,7 @@ static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
|
||||
|
||||
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
|
||||
CHECK(tagged_ptr);
|
||||
HWASAN_FREE_HOOK(tagged_ptr);
|
||||
RunFreeHooks(tagged_ptr);
|
||||
|
||||
bool in_taggable_region =
|
||||
InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
|
||||
|
@ -47,6 +47,12 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_join, void *t, void **arg) {
|
||||
return REAL(pthread_join)(t, arg);
|
||||
}
|
||||
|
||||
DEFINE_REAL_PTHREAD_FUNCTIONS
|
||||
|
||||
DEFINE_REAL(int, vfork)
|
||||
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
|
||||
|
||||
@ -189,7 +195,8 @@ void InitializeInterceptors() {
|
||||
INTERCEPT_FUNCTION(vfork);
|
||||
#endif // __linux__
|
||||
INTERCEPT_FUNCTION(pthread_create);
|
||||
#endif
|
||||
INTERCEPT_FUNCTION(pthread_join);
|
||||
# endif
|
||||
|
||||
inited = 1;
|
||||
}
|
||||
|
@ -40,5 +40,5 @@ void *__hwasan_memmove(void *to, const void *from, uptr size) {
|
||||
reinterpret_cast<uptr>(to), size);
|
||||
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
|
||||
reinterpret_cast<uptr>(from), size);
|
||||
return memmove(UntagPtr(to), UntagPtr(from), size);
|
||||
return memmove(to, from, size);
|
||||
}
|
||||
|
@ -22,21 +22,23 @@
|
||||
#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
|
||||
|
||||
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
|
||||
#define OPERATOR_NEW_BODY(nothrow) \
|
||||
GET_MALLOC_STACK_TRACE; \
|
||||
void *res = hwasan_malloc(size, &stack);\
|
||||
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
|
||||
return res
|
||||
#define OPERATOR_NEW_ALIGN_BODY(nothrow) \
|
||||
GET_MALLOC_STACK_TRACE; \
|
||||
void *res = hwasan_aligned_alloc(static_cast<uptr>(align), size, &stack); \
|
||||
if (!nothrow && UNLIKELY(!res)) \
|
||||
ReportOutOfMemory(size, &stack); \
|
||||
return res
|
||||
# define OPERATOR_NEW_BODY(nothrow) \
|
||||
GET_MALLOC_STACK_TRACE; \
|
||||
void *res = hwasan_malloc(size, &stack); \
|
||||
if (!nothrow && UNLIKELY(!res)) \
|
||||
ReportOutOfMemory(size, &stack); \
|
||||
return res
|
||||
# define OPERATOR_NEW_ALIGN_BODY(nothrow) \
|
||||
GET_MALLOC_STACK_TRACE; \
|
||||
void *res = hwasan_memalign(static_cast<uptr>(align), size, &stack); \
|
||||
if (!nothrow && UNLIKELY(!res)) \
|
||||
ReportOutOfMemory(size, &stack); \
|
||||
return res
|
||||
|
||||
#define OPERATOR_DELETE_BODY \
|
||||
GET_MALLOC_STACK_TRACE; \
|
||||
if (ptr) hwasan_free(ptr, &stack)
|
||||
# define OPERATOR_DELETE_BODY \
|
||||
GET_MALLOC_STACK_TRACE; \
|
||||
if (ptr) \
|
||||
hwasan_free(ptr, &stack)
|
||||
|
||||
#elif defined(__ANDROID__)
|
||||
|
||||
@ -44,8 +46,8 @@
|
||||
// since we previously released a runtime that intercepted these functions,
|
||||
// removing the interceptors would break ABI. Therefore we simply forward to
|
||||
// malloc and free.
|
||||
#define OPERATOR_NEW_BODY(nothrow) return malloc(size)
|
||||
#define OPERATOR_DELETE_BODY free(ptr)
|
||||
# define OPERATOR_NEW_BODY(nothrow) return malloc(size)
|
||||
# define OPERATOR_DELETE_BODY free(ptr)
|
||||
|
||||
#endif
|
||||
|
||||
@ -55,26 +57,27 @@ using namespace __hwasan;
|
||||
|
||||
// Fake std::nothrow_t to avoid including <new>.
|
||||
namespace std {
|
||||
struct nothrow_t {};
|
||||
struct nothrow_t {};
|
||||
} // namespace std
|
||||
|
||||
|
||||
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void *operator new(size_t size, std::nothrow_t const&) {
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(size_t size) {
|
||||
OPERATOR_NEW_BODY(false /*nothrow*/);
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
|
||||
size_t size) {
|
||||
OPERATOR_NEW_BODY(false /*nothrow*/);
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
|
||||
size_t size, std::nothrow_t const &) {
|
||||
OPERATOR_NEW_BODY(true /*nothrow*/);
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void *operator new[](size_t size, std::nothrow_t const&) {
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
|
||||
size_t size, std::nothrow_t const &) {
|
||||
OPERATOR_NEW_BODY(true /*nothrow*/);
|
||||
}
|
||||
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(void *ptr)
|
||||
NOEXCEPT {
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
|
||||
void *ptr) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
|
||||
|
23
libsanitizer/hwasan/hwasan_preinit.cpp
Normal file
23
libsanitizer/hwasan/hwasan_preinit.cpp
Normal file
@ -0,0 +1,23 @@
|
||||
//===-- hwasan_preinit.cpp ------------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of HWAddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Call __hwasan_init at the very early stage of process startup.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "hwasan_interface_internal.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
|
||||
#if SANITIZER_CAN_USE_PREINIT_ARRAY
|
||||
// The symbol is called __local_hwasan_preinit, because it's not intended to
|
||||
// be exported.
|
||||
// This code linked into the main executable when -fsanitize=hwaddress is in
|
||||
// the link flags. It can only use exported interface functions.
|
||||
__attribute__((section(".preinit_array"), used)) static void (
|
||||
*__local_hwasan_preinit)(void) = __hwasan_init;
|
||||
#endif
|
@ -211,6 +211,15 @@ void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
|
||||
// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
|
||||
void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
|
||||
char *out_buf, size_t out_buf_size);
|
||||
// Determine the return address.
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
#define __sanitizer_return_address() \
|
||||
__builtin_extract_return_addr(__builtin_return_address(0))
|
||||
#else
|
||||
extern "C" void *_ReturnAddress(void);
|
||||
#pragma intrinsic(_ReturnAddress)
|
||||
#define __sanitizer_return_address() _ReturnAddress()
|
||||
#endif
|
||||
|
||||
/// Sets the callback to be called immediately before death on error.
|
||||
///
|
||||
|
@ -27,6 +27,10 @@ typedef uint32_t dfsan_origin;
|
||||
/// Signature of the callback argument to dfsan_set_write_callback().
|
||||
typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
|
||||
|
||||
/// Signature of the callback argument to dfsan_set_conditional_callback().
|
||||
typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
|
||||
dfsan_origin origin);
|
||||
|
||||
/// Computes the union of \c l1 and \c l2, resulting in a union label.
|
||||
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
|
||||
|
||||
@ -54,6 +58,10 @@ dfsan_origin dfsan_get_origin(long data);
|
||||
/// Retrieves the label associated with the data at the given address.
|
||||
dfsan_label dfsan_read_label(const void *addr, size_t size);
|
||||
|
||||
/// Return the origin associated with the first taint byte in the size bytes
|
||||
/// from the address addr.
|
||||
dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, size_t size);
|
||||
|
||||
/// Returns whether the given label label contains the label elem.
|
||||
int dfsan_has_label(dfsan_label label, dfsan_label elem);
|
||||
|
||||
@ -70,6 +78,19 @@ void dfsan_flush(void);
|
||||
/// callback executes. Pass in NULL to remove any callback.
|
||||
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
|
||||
|
||||
/// Sets a callback to be invoked on any conditional expressions which have a
|
||||
/// taint label set. This can be used to find where tainted data influences
|
||||
/// the behavior of the program.
|
||||
/// These callbacks will only be added when -dfsan-conditional-callbacks=true.
|
||||
void dfsan_set_conditional_callback(dfsan_conditional_callback_t callback);
|
||||
|
||||
/// Conditional expressions occur during signal handlers.
|
||||
/// Making callbacks that handle signals well is tricky, so when
|
||||
/// -dfsan-conditional-callbacks=true, conditional expressions used in signal
|
||||
/// handlers will add the labels they see into a global (bitwise-or together).
|
||||
/// This function returns all label bits seen in signal handler conditions.
|
||||
dfsan_label dfsan_get_labels_in_signal_conditional();
|
||||
|
||||
/// Interceptor hooks.
|
||||
/// Whenever a dfsan's custom function is called the corresponding
|
||||
/// hook is called it non-zero. The hooks should be defined by the user.
|
||||
@ -87,6 +108,9 @@ void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
|
||||
/// prints description at the beginning of the trace. If origin tracking is not
|
||||
/// on, or the address is not labeled, it prints nothing.
|
||||
void dfsan_print_origin_trace(const void *addr, const char *description);
|
||||
/// As above, but use an origin id from dfsan_get_origin() instead of address.
|
||||
/// Does not include header line with taint label and address information.
|
||||
void dfsan_print_origin_id_trace(dfsan_origin origin);
|
||||
|
||||
/// Prints the origin trace of the label at the address \p addr to a
|
||||
/// pre-allocated output buffer. If origin tracking is not on, or the address is
|
||||
@ -124,6 +148,10 @@ void dfsan_print_origin_trace(const void *addr, const char *description);
|
||||
/// return value is not less than \p out_buf_size.
|
||||
size_t dfsan_sprint_origin_trace(const void *addr, const char *description,
|
||||
char *out_buf, size_t out_buf_size);
|
||||
/// As above, but use an origin id from dfsan_get_origin() instead of address.
|
||||
/// Does not include header line with taint label and address information.
|
||||
size_t dfsan_sprint_origin_id_trace(dfsan_origin origin, char *out_buf,
|
||||
size_t out_buf_size);
|
||||
|
||||
/// Prints the stack trace leading to this call to a pre-allocated output
|
||||
/// buffer.
|
||||
|
@ -401,6 +401,7 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
|
||||
// The following prologues cannot be patched because of the short jump
|
||||
// jumping to the patching region.
|
||||
|
||||
#if SANITIZER_WINDOWS64
|
||||
// ntdll!wcslen in Win11
|
||||
// 488bc1 mov rax,rcx
|
||||
// 0fb710 movzx edx,word ptr [rax]
|
||||
@ -422,6 +423,7 @@ static const u8 kPrologueWithShortJump2[] = {
|
||||
0x4c, 0x8b, 0xc1, 0x8a, 0x01, 0x48, 0xff, 0xc1,
|
||||
0x84, 0xc0, 0x75, 0xf7,
|
||||
};
|
||||
#endif
|
||||
|
||||
// Returns 0 on error.
|
||||
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
@ -602,6 +604,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
|
||||
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
|
||||
case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
|
||||
case 0x247c8948: // 48 89 7c 24 XX : mov QWORD PTR [rsp + XX], rdi
|
||||
case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
|
||||
case 0x24548948: // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx
|
||||
case 0x244c894c: // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9
|
||||
|
@ -13,11 +13,12 @@
|
||||
|
||||
#include "lsan.h"
|
||||
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_flag_parser.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_common.h"
|
||||
#include "lsan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_flag_parser.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_interface_internal.h"
|
||||
|
||||
bool lsan_inited;
|
||||
bool lsan_init_is_running;
|
||||
@ -99,9 +100,7 @@ extern "C" void __lsan_init() {
|
||||
InitializeThreadRegistry();
|
||||
InstallDeadlySignalHandlers(LsanOnDeadlySignal);
|
||||
InitializeMainThread();
|
||||
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
|
||||
Atexit(DoLeakCheck);
|
||||
InstallAtExitCheckLeaks();
|
||||
|
||||
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
|
||||
|
||||
|
@ -13,17 +13,17 @@
|
||||
|
||||
#include "lsan_thread.h"
|
||||
#if SANITIZER_POSIX
|
||||
#include "lsan_posix.h"
|
||||
# include "lsan_posix.h"
|
||||
#elif SANITIZER_FUCHSIA
|
||||
#include "lsan_fuchsia.h"
|
||||
# include "lsan_fuchsia.h"
|
||||
#endif
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
|
||||
#define GET_STACK_TRACE(max_size, fast) \
|
||||
__sanitizer::BufferedStackTrace stack; \
|
||||
stack.Unwind(StackTrace::GetCurrentPc(), \
|
||||
GET_CURRENT_FRAME(), nullptr, fast, max_size);
|
||||
#define GET_STACK_TRACE(max_size, fast) \
|
||||
__sanitizer::BufferedStackTrace stack; \
|
||||
stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, fast, \
|
||||
max_size);
|
||||
|
||||
#define GET_STACK_TRACE_FATAL \
|
||||
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
|
||||
@ -39,12 +39,14 @@ namespace __lsan {
|
||||
void InitializeInterceptors();
|
||||
void ReplaceSystemMalloc();
|
||||
void LsanOnDeadlySignal(int signo, void *siginfo, void *context);
|
||||
void InstallAtExitCheckLeaks();
|
||||
|
||||
#define ENSURE_LSAN_INITED do { \
|
||||
CHECK(!lsan_init_is_running); \
|
||||
if (!lsan_inited) \
|
||||
__lsan_init(); \
|
||||
} while (0)
|
||||
#define ENSURE_LSAN_INITED \
|
||||
do { \
|
||||
CHECK(!lsan_init_is_running); \
|
||||
if (!lsan_inited) \
|
||||
__lsan_init(); \
|
||||
} while (0)
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
|
@ -27,11 +27,11 @@ extern "C" void *memset(void *ptr, int value, uptr num);
|
||||
|
||||
namespace __lsan {
|
||||
#if defined(__i386__) || defined(__arm__)
|
||||
static const uptr kMaxAllowedMallocSize = 1UL << 30;
|
||||
static const uptr kMaxAllowedMallocSize = 1ULL << 30;
|
||||
#elif defined(__mips64) || defined(__aarch64__)
|
||||
static const uptr kMaxAllowedMallocSize = 4UL << 30;
|
||||
static const uptr kMaxAllowedMallocSize = 4ULL << 30;
|
||||
#else
|
||||
static const uptr kMaxAllowedMallocSize = 8UL << 30;
|
||||
static const uptr kMaxAllowedMallocSize = 8ULL << 30;
|
||||
#endif
|
||||
|
||||
static Allocator allocator;
|
||||
@ -88,6 +88,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||
size = 1;
|
||||
if (size > max_malloc_size)
|
||||
return ReportAllocationSizeTooBig(size, stack);
|
||||
if (UNLIKELY(IsRssLimitExceeded())) {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportRssLimitExceeded(&stack);
|
||||
}
|
||||
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
|
||||
if (UNLIKELY(!p)) {
|
||||
SetAllocatorOutOfMemory();
|
||||
@ -99,7 +104,6 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||
if (cleared && allocator.FromPrimary(p))
|
||||
memset(p, 0, size);
|
||||
RegisterAllocation(stack, p, size);
|
||||
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
|
||||
RunMallocHooks(p, size);
|
||||
return p;
|
||||
}
|
||||
@ -115,7 +119,6 @@ static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
|
||||
}
|
||||
|
||||
void Deallocate(void *p) {
|
||||
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
|
||||
RunFreeHooks(p);
|
||||
RegisterDeallocation(p);
|
||||
allocator.Deallocate(GetAllocatorCache(), p);
|
||||
@ -359,16 +362,4 @@ uptr __sanitizer_get_allocated_size(const void *p) {
|
||||
return GetMallocUsableSize(p);
|
||||
}
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// Provide default (no-op) implementation of malloc hooks.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_malloc_hook(void *ptr, uptr size) {
|
||||
(void)ptr;
|
||||
(void)size;
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_free_hook(void *ptr) {
|
||||
(void)ptr;
|
||||
}
|
||||
#endif
|
||||
} // extern "C"
|
||||
|
@ -66,12 +66,9 @@ template <typename AddressSpaceView>
|
||||
using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
|
||||
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
|
||||
#elif defined(__x86_64__) || defined(__powerpc64__) || defined(__s390x__)
|
||||
# if SANITIZER_FUCHSIA
|
||||
# if SANITIZER_FUCHSIA || defined(__powerpc64__)
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
# elif defined(__powerpc64__)
|
||||
const uptr kAllocatorSpace = 0xa0000000000ULL;
|
||||
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
|
||||
#elif defined(__s390x__)
|
||||
const uptr kAllocatorSpace = 0x40000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
|
@ -34,7 +34,6 @@ Mutex global_mutex;
|
||||
|
||||
Flags lsan_flags;
|
||||
|
||||
|
||||
void DisableCounterUnderflow() {
|
||||
if (common_flags()->detect_leaks) {
|
||||
Report("Unmatched call to __lsan_enable().\n");
|
||||
@ -43,44 +42,48 @@ void DisableCounterUnderflow() {
|
||||
}
|
||||
|
||||
void Flags::SetDefaults() {
|
||||
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||
#include "lsan_flags.inc"
|
||||
#undef LSAN_FLAG
|
||||
# define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||
# include "lsan_flags.inc"
|
||||
# undef LSAN_FLAG
|
||||
}
|
||||
|
||||
void RegisterLsanFlags(FlagParser *parser, Flags *f) {
|
||||
#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
|
||||
RegisterFlag(parser, #Name, Description, &f->Name);
|
||||
#include "lsan_flags.inc"
|
||||
#undef LSAN_FLAG
|
||||
# define LSAN_FLAG(Type, Name, DefaultValue, Description) \
|
||||
RegisterFlag(parser, #Name, Description, &f->Name);
|
||||
# include "lsan_flags.inc"
|
||||
# undef LSAN_FLAG
|
||||
}
|
||||
|
||||
#define LOG_POINTERS(...) \
|
||||
do { \
|
||||
if (flags()->log_pointers) Report(__VA_ARGS__); \
|
||||
} while (0)
|
||||
# define LOG_POINTERS(...) \
|
||||
do { \
|
||||
if (flags()->log_pointers) \
|
||||
Report(__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define LOG_THREADS(...) \
|
||||
do { \
|
||||
if (flags()->log_threads) Report(__VA_ARGS__); \
|
||||
} while (0)
|
||||
# define LOG_THREADS(...) \
|
||||
do { \
|
||||
if (flags()->log_threads) \
|
||||
Report(__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
class LeakSuppressionContext {
|
||||
bool parsed = false;
|
||||
SuppressionContext context;
|
||||
bool suppressed_stacks_sorted = true;
|
||||
InternalMmapVector<u32> suppressed_stacks;
|
||||
const LoadedModule *suppress_module = nullptr;
|
||||
|
||||
Suppression *GetSuppressionForAddr(uptr addr);
|
||||
void LazyInit();
|
||||
Suppression *GetSuppressionForAddr(uptr addr);
|
||||
bool SuppressInvalid(const StackTrace &stack);
|
||||
bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
|
||||
|
||||
public:
|
||||
LeakSuppressionContext(const char *supprression_types[],
|
||||
int suppression_types_num)
|
||||
: context(supprression_types, suppression_types_num) {}
|
||||
|
||||
Suppression *GetSuppressionForStack(u32 stack_trace_id,
|
||||
const StackTrace &stack);
|
||||
bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
|
||||
|
||||
const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
|
||||
if (!suppressed_stacks_sorted) {
|
||||
@ -95,17 +98,17 @@ class LeakSuppressionContext {
|
||||
ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
|
||||
static LeakSuppressionContext *suppression_ctx = nullptr;
|
||||
static const char kSuppressionLeak[] = "leak";
|
||||
static const char *kSuppressionTypes[] = { kSuppressionLeak };
|
||||
static const char *kSuppressionTypes[] = {kSuppressionLeak};
|
||||
static const char kStdSuppressions[] =
|
||||
#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
# if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
// For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
// definition.
|
||||
"leak:*pthread_exit*\n"
|
||||
#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
#if SANITIZER_MAC
|
||||
# endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
# if SANITIZER_MAC
|
||||
// For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
|
||||
"leak:*_os_trace*\n"
|
||||
#endif
|
||||
# endif
|
||||
// TLS leak in some glibc versions, described in
|
||||
// https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
|
||||
"leak:*tls_get_addr*\n";
|
||||
@ -123,9 +126,93 @@ void LeakSuppressionContext::LazyInit() {
|
||||
if (&__lsan_default_suppressions)
|
||||
context.Parse(__lsan_default_suppressions());
|
||||
context.Parse(kStdSuppressions);
|
||||
if (flags()->use_tls && flags()->use_ld_allocations)
|
||||
suppress_module = GetLinker();
|
||||
}
|
||||
}
|
||||
|
||||
Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
|
||||
Suppression *s = nullptr;
|
||||
|
||||
// Suppress by module name.
|
||||
const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
|
||||
if (!module_name)
|
||||
module_name = "<unknown module>";
|
||||
if (context.Match(module_name, kSuppressionLeak, &s))
|
||||
return s;
|
||||
|
||||
// Suppress by file or function name.
|
||||
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
|
||||
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
||||
if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
|
||||
context.Match(cur->info.file, kSuppressionLeak, &s)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
frames->ClearAll();
|
||||
return s;
|
||||
}
|
||||
|
||||
static uptr GetCallerPC(const StackTrace &stack) {
|
||||
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
||||
if (stack.size >= 2)
|
||||
return stack.trace[1];
|
||||
return 0;
|
||||
}
|
||||
|
||||
// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
|
||||
// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
|
||||
// modules accounting etc.
|
||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||
// blocks, but we can make sure they come from our own allocator by intercepting
|
||||
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
||||
// addresses are stored in a dynamically allocated array (the DTV) which is
|
||||
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
||||
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
||||
// the DTV. This is because the initial DTV is allocated before our interception
|
||||
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
||||
// can't special-case it either, since we don't know its size.
|
||||
// Our solution is to include in the root set all allocations made from
|
||||
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
||||
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
||||
// which we don't care about).
|
||||
// On all other platforms, this simply checks to ensure that the caller pc is
|
||||
// valid before reporting chunks as leaked.
|
||||
bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
|
||||
uptr caller_pc = GetCallerPC(stack);
|
||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||
return !caller_pc ||
|
||||
(suppress_module && suppress_module->containsAddress(caller_pc));
|
||||
}
|
||||
|
||||
bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
|
||||
uptr hit_count, uptr total_size) {
|
||||
for (uptr i = 0; i < stack.size; i++) {
|
||||
Suppression *s = GetSuppressionForAddr(
|
||||
StackTrace::GetPreviousInstructionPc(stack.trace[i]));
|
||||
if (s) {
|
||||
s->weight += total_size;
|
||||
atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
|
||||
uptr total_size) {
|
||||
LazyInit();
|
||||
StackTrace stack = StackDepotGet(stack_trace_id);
|
||||
if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
|
||||
return false;
|
||||
suppressed_stacks_sorted = false;
|
||||
suppressed_stacks.push_back(stack_trace_id);
|
||||
return true;
|
||||
}
|
||||
|
||||
static LeakSuppressionContext *GetSuppressionContext() {
|
||||
CHECK(suppression_ctx);
|
||||
return suppression_ctx;
|
||||
@ -146,9 +233,9 @@ void InitCommonLsan() {
|
||||
}
|
||||
}
|
||||
|
||||
class Decorator: public __sanitizer::SanitizerCommonDecorator {
|
||||
class Decorator : public __sanitizer::SanitizerCommonDecorator {
|
||||
public:
|
||||
Decorator() : SanitizerCommonDecorator() { }
|
||||
Decorator() : SanitizerCommonDecorator() {}
|
||||
const char *Error() { return Red(); }
|
||||
const char *Leak() { return Blue(); }
|
||||
};
|
||||
@ -157,19 +244,19 @@ static inline bool CanBeAHeapPointer(uptr p) {
|
||||
// Since our heap is located in mmap-ed memory, we can assume a sensible lower
|
||||
// bound on heap addresses.
|
||||
const uptr kMinAddress = 4 * 4096;
|
||||
if (p < kMinAddress) return false;
|
||||
#if defined(__x86_64__)
|
||||
if (p < kMinAddress)
|
||||
return false;
|
||||
# if defined(__x86_64__)
|
||||
// Accept only canonical form user-space addresses.
|
||||
return ((p >> 47) == 0);
|
||||
#elif defined(__mips64)
|
||||
# elif defined(__mips64)
|
||||
return ((p >> 40) == 0);
|
||||
#elif defined(__aarch64__)
|
||||
unsigned runtimeVMA =
|
||||
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
|
||||
# elif defined(__aarch64__)
|
||||
unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
|
||||
return ((p >> runtimeVMA) == 0);
|
||||
#else
|
||||
# else
|
||||
return true;
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
|
||||
// Scans the memory range, looking for byte patterns that point into allocator
|
||||
@ -178,8 +265,7 @@ static inline bool CanBeAHeapPointer(uptr p) {
|
||||
// (|tag| = kReachable) and finding indirectly leaked chunks
|
||||
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
|
||||
// so |frontier| = 0.
|
||||
void ScanRangeForPointers(uptr begin, uptr end,
|
||||
Frontier *frontier,
|
||||
void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
|
||||
const char *region_type, ChunkTag tag) {
|
||||
CHECK(tag == kReachable || tag == kIndirectlyLeaked);
|
||||
const uptr alignment = flags()->pointer_alignment();
|
||||
@ -190,13 +276,17 @@ void ScanRangeForPointers(uptr begin, uptr end,
|
||||
pp = pp + alignment - pp % alignment;
|
||||
for (; pp + sizeof(void *) <= end; pp += alignment) {
|
||||
void *p = *reinterpret_cast<void **>(pp);
|
||||
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
|
||||
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p)))
|
||||
continue;
|
||||
uptr chunk = PointsIntoChunk(p);
|
||||
if (!chunk) continue;
|
||||
if (!chunk)
|
||||
continue;
|
||||
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
|
||||
if (chunk == begin) continue;
|
||||
if (chunk == begin)
|
||||
continue;
|
||||
LsanMetadata m(chunk);
|
||||
if (m.tag() == kReachable || m.tag() == kIgnored) continue;
|
||||
if (m.tag() == kReachable || m.tag() == kIgnored)
|
||||
continue;
|
||||
|
||||
// Do this check relatively late so we can log only the interesting cases.
|
||||
if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
|
||||
@ -234,23 +324,23 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
|
||||
}
|
||||
}
|
||||
|
||||
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
|
||||
void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
|
||||
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
|
||||
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
|
||||
}
|
||||
|
||||
#if SANITIZER_FUCHSIA
|
||||
# if SANITIZER_FUCHSIA
|
||||
|
||||
// Fuchsia handles all threads together with its own callback.
|
||||
static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
|
||||
|
||||
#else
|
||||
# else
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
# if SANITIZER_ANDROID
|
||||
// FIXME: Move this out into *libcdep.cpp
|
||||
extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
|
||||
pid_t, void (*cb)(void *, void *, uptr, void *), void *);
|
||||
#endif
|
||||
# endif
|
||||
|
||||
static void ProcessThreadRegistry(Frontier *frontier) {
|
||||
InternalMmapVector<uptr> ptrs;
|
||||
@ -282,9 +372,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
LOG_THREADS("Processing thread %llu.\n", os_id);
|
||||
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
|
||||
DTLS *dtls;
|
||||
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
|
||||
&tls_begin, &tls_end,
|
||||
&cache_begin, &cache_end, &dtls);
|
||||
bool thread_found =
|
||||
GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
|
||||
&tls_end, &cache_begin, &cache_end, &dtls);
|
||||
if (!thread_found) {
|
||||
// If a thread can't be found in the thread registry, it's probably in the
|
||||
// process of destruction. Log this event and move on.
|
||||
@ -298,7 +388,8 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
Report("Unable to get registers from thread %llu.\n", os_id);
|
||||
// If unable to get SP, consider the entire stack to be reachable unless
|
||||
// GetRegistersAndSP failed with ESRCH.
|
||||
if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
|
||||
if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
|
||||
continue;
|
||||
sp = stack_begin;
|
||||
}
|
||||
|
||||
@ -353,7 +444,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
kReachable);
|
||||
}
|
||||
}
|
||||
#if SANITIZER_ANDROID
|
||||
# if SANITIZER_ANDROID
|
||||
auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
|
||||
void *arg) -> void {
|
||||
ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
|
||||
@ -366,7 +457,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
// thread is suspended in the middle of updating its DTLS. IOWs, we
|
||||
// could scan already freed memory. (probably fine for now)
|
||||
__libc_iterate_dynamic_tls(os_id, cb, frontier);
|
||||
#else
|
||||
# else
|
||||
if (dtls && !DTLSInDestruction(dtls)) {
|
||||
ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
|
||||
uptr dtls_beg = dtv.beg;
|
||||
@ -383,7 +474,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
// this and continue.
|
||||
LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
|
||||
}
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -391,13 +482,14 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
ProcessThreadRegistry(frontier);
|
||||
}
|
||||
|
||||
#endif // SANITIZER_FUCHSIA
|
||||
# endif // SANITIZER_FUCHSIA
|
||||
|
||||
void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
|
||||
uptr region_begin, uptr region_end, bool is_readable) {
|
||||
uptr intersection_begin = Max(root_region.begin, region_begin);
|
||||
uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
|
||||
if (intersection_begin >= intersection_end) return;
|
||||
if (intersection_begin >= intersection_end)
|
||||
return;
|
||||
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
|
||||
(void *)root_region.begin,
|
||||
(void *)(root_region.begin + root_region.size),
|
||||
@ -420,7 +512,8 @@ static void ProcessRootRegion(Frontier *frontier,
|
||||
|
||||
// Scans root regions for heap pointers.
|
||||
static void ProcessRootRegions(Frontier *frontier) {
|
||||
if (!flags()->use_root_regions) return;
|
||||
if (!flags()->use_root_regions)
|
||||
return;
|
||||
for (uptr i = 0; i < root_regions.size(); i++)
|
||||
ProcessRootRegion(frontier, root_regions[i]);
|
||||
}
|
||||
@ -477,68 +570,6 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
|
||||
}
|
||||
}
|
||||
|
||||
static uptr GetCallerPC(const StackTrace &stack) {
|
||||
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
||||
if (stack.size >= 2)
|
||||
return stack.trace[1];
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct InvalidPCParam {
|
||||
Frontier *frontier;
|
||||
bool skip_linker_allocations;
|
||||
};
|
||||
|
||||
// ForEachChunk callback. If the caller pc is invalid or is within the linker,
|
||||
// mark as reachable. Called by ProcessPlatformSpecificAllocations.
|
||||
static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
||||
CHECK(arg);
|
||||
InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
|
||||
chunk = GetUserBegin(chunk);
|
||||
LsanMetadata m(chunk);
|
||||
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
||||
u32 stack_id = m.stack_trace_id();
|
||||
uptr caller_pc = 0;
|
||||
if (stack_id > 0)
|
||||
caller_pc = GetCallerPC(StackDepotGet(stack_id));
|
||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
||||
GetLinker()->containsAddress(caller_pc))) {
|
||||
m.set_tag(kReachable);
|
||||
param->frontier->push_back(chunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
|
||||
// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
|
||||
// modules accounting etc.
|
||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||
// blocks, but we can make sure they come from our own allocator by intercepting
|
||||
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
||||
// addresses are stored in a dynamically allocated array (the DTV) which is
|
||||
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
||||
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
||||
// the DTV. This is because the initial DTV is allocated before our interception
|
||||
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
||||
// can't special-case it either, since we don't know its size.
|
||||
// Our solution is to include in the root set all allocations made from
|
||||
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
||||
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
||||
// which we don't care about).
|
||||
// On all other platforms, this simply checks to ensure that the caller pc is
|
||||
// valid before reporting chunks as leaked.
|
||||
static void ProcessPC(Frontier *frontier) {
|
||||
InvalidPCParam arg;
|
||||
arg.frontier = frontier;
|
||||
arg.skip_linker_allocations =
|
||||
flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
|
||||
ForEachChunk(MarkInvalidPCCb, &arg);
|
||||
}
|
||||
|
||||
// Sets the appropriate tag on each chunk.
|
||||
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
|
||||
Frontier *frontier) {
|
||||
@ -554,9 +585,6 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
|
||||
ProcessRootRegions(frontier);
|
||||
FloodFillTag(frontier, kReachable);
|
||||
|
||||
CHECK_EQ(0, frontier->size());
|
||||
ProcessPC(frontier);
|
||||
|
||||
// The check here is relatively expensive, so we do this in a separate flood
|
||||
// fill. That way we can skip the check for chunks that are reachable
|
||||
// otherwise.
|
||||
@ -583,14 +611,13 @@ static void ResetTagsCb(uptr chunk, void *arg) {
|
||||
// a LeakReport.
|
||||
static void CollectLeaksCb(uptr chunk, void *arg) {
|
||||
CHECK(arg);
|
||||
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
|
||||
LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
|
||||
chunk = GetUserBegin(chunk);
|
||||
LsanMetadata m(chunk);
|
||||
if (!m.allocated()) return;
|
||||
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
|
||||
leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(),
|
||||
m.tag());
|
||||
}
|
||||
if (!m.allocated())
|
||||
return;
|
||||
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
|
||||
leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
|
||||
}
|
||||
|
||||
void LeakSuppressionContext::PrintMatchedSuppressions() {
|
||||
@ -622,13 +649,13 @@ static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
|
||||
}
|
||||
}
|
||||
|
||||
#if SANITIZER_FUCHSIA
|
||||
# if SANITIZER_FUCHSIA
|
||||
|
||||
// Fuchsia provides a libc interface that guarantees all threads are
|
||||
// covered, and SuspendedThreadList is never really used.
|
||||
static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
|
||||
|
||||
#else // !SANITIZER_FUCHSIA
|
||||
# else // !SANITIZER_FUCHSIA
|
||||
|
||||
static void ReportUnsuspendedThreads(
|
||||
const SuspendedThreadsList &suspended_threads) {
|
||||
@ -642,7 +669,7 @@ static void ReportUnsuspendedThreads(
|
||||
&ReportIfNotSuspended, &threads);
|
||||
}
|
||||
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
# endif // !SANITIZER_FUCHSIA
|
||||
|
||||
static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
|
||||
void *arg) {
|
||||
@ -651,7 +678,7 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
|
||||
CHECK(!param->success);
|
||||
ReportUnsuspendedThreads(suspended_threads);
|
||||
ClassifyAllChunks(suspended_threads, ¶m->frontier);
|
||||
ForEachChunk(CollectLeaksCb, ¶m->leak_report);
|
||||
ForEachChunk(CollectLeaksCb, ¶m->leaks);
|
||||
// Clean up for subsequent leak checks. This assumes we did not overwrite any
|
||||
// kIgnored tags.
|
||||
ForEachChunk(ResetTagsCb, nullptr);
|
||||
@ -700,17 +727,20 @@ static bool CheckForLeaks() {
|
||||
"etc)\n");
|
||||
Die();
|
||||
}
|
||||
LeakReport leak_report;
|
||||
leak_report.AddLeakedChunks(param.leaks);
|
||||
|
||||
// No new suppressions stacks, so rerun will not help and we can report.
|
||||
if (!param.leak_report.ApplySuppressions())
|
||||
return PrintResults(param.leak_report);
|
||||
if (!leak_report.ApplySuppressions())
|
||||
return PrintResults(leak_report);
|
||||
|
||||
// No indirect leaks to report, so we are done here.
|
||||
if (!param.leak_report.IndirectUnsuppressedLeakCount())
|
||||
return PrintResults(param.leak_report);
|
||||
if (!leak_report.IndirectUnsuppressedLeakCount())
|
||||
return PrintResults(leak_report);
|
||||
|
||||
if (i >= 8) {
|
||||
Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
|
||||
return PrintResults(param.leak_report);
|
||||
return PrintResults(leak_report);
|
||||
}
|
||||
|
||||
// We found a new previously unseen suppressed call stack. Rerun to make
|
||||
@ -726,10 +756,12 @@ bool HasReportedLeaks() { return has_reported_leaks; }
|
||||
void DoLeakCheck() {
|
||||
Lock l(&global_mutex);
|
||||
static bool already_done;
|
||||
if (already_done) return;
|
||||
if (already_done)
|
||||
return;
|
||||
already_done = true;
|
||||
has_reported_leaks = CheckForLeaks();
|
||||
if (has_reported_leaks) HandleLeaks();
|
||||
if (has_reported_leaks)
|
||||
HandleLeaks();
|
||||
}
|
||||
|
||||
static int DoRecoverableLeakCheck() {
|
||||
@ -740,80 +772,50 @@ static int DoRecoverableLeakCheck() {
|
||||
|
||||
void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
|
||||
|
||||
Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
|
||||
Suppression *s = nullptr;
|
||||
|
||||
// Suppress by module name.
|
||||
if (const char *module_name =
|
||||
Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
|
||||
if (context.Match(module_name, kSuppressionLeak, &s))
|
||||
return s;
|
||||
|
||||
// Suppress by file or function name.
|
||||
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
|
||||
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
||||
if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
|
||||
context.Match(cur->info.file, kSuppressionLeak, &s)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
frames->ClearAll();
|
||||
return s;
|
||||
}
|
||||
|
||||
Suppression *LeakSuppressionContext::GetSuppressionForStack(
|
||||
u32 stack_trace_id, const StackTrace &stack) {
|
||||
LazyInit();
|
||||
for (uptr i = 0; i < stack.size; i++) {
|
||||
Suppression *s = GetSuppressionForAddr(
|
||||
StackTrace::GetPreviousInstructionPc(stack.trace[i]));
|
||||
if (s) {
|
||||
suppressed_stacks_sorted = false;
|
||||
suppressed_stacks.push_back(stack_trace_id);
|
||||
return s;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
///// LeakReport implementation. /////
|
||||
|
||||
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
|
||||
// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
|
||||
// in real-world applications.
|
||||
// FIXME: Get rid of this limit by changing the implementation of LeakReport to
|
||||
// use a hash table.
|
||||
// FIXME: Get rid of this limit by moving logic into DedupLeaks.
|
||||
const uptr kMaxLeaksConsidered = 5000;
|
||||
|
||||
void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
|
||||
uptr leaked_size, ChunkTag tag) {
|
||||
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
|
||||
void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
|
||||
for (const LeakedChunk &leak : chunks) {
|
||||
uptr chunk = leak.chunk;
|
||||
u32 stack_trace_id = leak.stack_trace_id;
|
||||
uptr leaked_size = leak.leaked_size;
|
||||
ChunkTag tag = leak.tag;
|
||||
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
|
||||
|
||||
if (u32 resolution = flags()->resolution) {
|
||||
StackTrace stack = StackDepotGet(stack_trace_id);
|
||||
stack.size = Min(stack.size, resolution);
|
||||
stack_trace_id = StackDepotPut(stack);
|
||||
}
|
||||
|
||||
bool is_directly_leaked = (tag == kDirectlyLeaked);
|
||||
uptr i;
|
||||
for (i = 0; i < leaks_.size(); i++) {
|
||||
if (leaks_[i].stack_trace_id == stack_trace_id &&
|
||||
leaks_[i].is_directly_leaked == is_directly_leaked) {
|
||||
leaks_[i].hit_count++;
|
||||
leaks_[i].total_size += leaked_size;
|
||||
break;
|
||||
if (u32 resolution = flags()->resolution) {
|
||||
StackTrace stack = StackDepotGet(stack_trace_id);
|
||||
stack.size = Min(stack.size, resolution);
|
||||
stack_trace_id = StackDepotPut(stack);
|
||||
}
|
||||
|
||||
bool is_directly_leaked = (tag == kDirectlyLeaked);
|
||||
uptr i;
|
||||
for (i = 0; i < leaks_.size(); i++) {
|
||||
if (leaks_[i].stack_trace_id == stack_trace_id &&
|
||||
leaks_[i].is_directly_leaked == is_directly_leaked) {
|
||||
leaks_[i].hit_count++;
|
||||
leaks_[i].total_size += leaked_size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == leaks_.size()) {
|
||||
if (leaks_.size() == kMaxLeaksConsidered)
|
||||
return;
|
||||
Leak leak = {next_id_++, /* hit_count */ 1,
|
||||
leaked_size, stack_trace_id,
|
||||
is_directly_leaked, /* is_suppressed */ false};
|
||||
leaks_.push_back(leak);
|
||||
}
|
||||
if (flags()->report_objects) {
|
||||
LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
|
||||
leaked_objects_.push_back(obj);
|
||||
}
|
||||
}
|
||||
if (i == leaks_.size()) {
|
||||
if (leaks_.size() == kMaxLeaksConsidered) return;
|
||||
Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
|
||||
is_directly_leaked, /* is_suppressed */ false };
|
||||
leaks_.push_back(leak);
|
||||
}
|
||||
if (flags()->report_objects) {
|
||||
LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
|
||||
leaked_objects_.push_back(obj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -828,9 +830,10 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
|
||||
CHECK(leaks_.size() <= kMaxLeaksConsidered);
|
||||
Printf("\n");
|
||||
if (leaks_.size() == kMaxLeaksConsidered)
|
||||
Printf("Too many leaks! Only the first %zu leaks encountered will be "
|
||||
"reported.\n",
|
||||
kMaxLeaksConsidered);
|
||||
Printf(
|
||||
"Too many leaks! Only the first %zu leaks encountered will be "
|
||||
"reported.\n",
|
||||
kMaxLeaksConsidered);
|
||||
|
||||
uptr unsuppressed_count = UnsuppressedLeakCount();
|
||||
if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
|
||||
@ -838,10 +841,12 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
|
||||
Sort(leaks_.data(), leaks_.size(), &LeakComparator);
|
||||
uptr leaks_reported = 0;
|
||||
for (uptr i = 0; i < leaks_.size(); i++) {
|
||||
if (leaks_[i].is_suppressed) continue;
|
||||
if (leaks_[i].is_suppressed)
|
||||
continue;
|
||||
PrintReportForLeak(i);
|
||||
leaks_reported++;
|
||||
if (leaks_reported == num_leaks_to_report) break;
|
||||
if (leaks_reported == num_leaks_to_report)
|
||||
break;
|
||||
}
|
||||
if (leaks_reported < unsuppressed_count) {
|
||||
uptr remaining = unsuppressed_count - leaks_reported;
|
||||
@ -880,9 +885,10 @@ void LeakReport::PrintSummary() {
|
||||
CHECK(leaks_.size() <= kMaxLeaksConsidered);
|
||||
uptr bytes = 0, allocations = 0;
|
||||
for (uptr i = 0; i < leaks_.size(); i++) {
|
||||
if (leaks_[i].is_suppressed) continue;
|
||||
bytes += leaks_[i].total_size;
|
||||
allocations += leaks_[i].hit_count;
|
||||
if (leaks_[i].is_suppressed)
|
||||
continue;
|
||||
bytes += leaks_[i].total_size;
|
||||
allocations += leaks_[i].hit_count;
|
||||
}
|
||||
InternalScopedString summary;
|
||||
summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
|
||||
@ -894,12 +900,8 @@ uptr LeakReport::ApplySuppressions() {
|
||||
LeakSuppressionContext *suppressions = GetSuppressionContext();
|
||||
uptr new_suppressions = false;
|
||||
for (uptr i = 0; i < leaks_.size(); i++) {
|
||||
Suppression *s = suppressions->GetSuppressionForStack(
|
||||
leaks_[i].stack_trace_id, StackDepotGet(leaks_[i].stack_trace_id));
|
||||
if (s) {
|
||||
s->weight += leaks_[i].total_size;
|
||||
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
|
||||
leaks_[i].hit_count);
|
||||
if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
|
||||
leaks_[i].total_size)) {
|
||||
leaks_[i].is_suppressed = true;
|
||||
++new_suppressions;
|
||||
}
|
||||
@ -910,7 +912,8 @@ uptr LeakReport::ApplySuppressions() {
|
||||
uptr LeakReport::UnsuppressedLeakCount() {
|
||||
uptr result = 0;
|
||||
for (uptr i = 0; i < leaks_.size(); i++)
|
||||
if (!leaks_[i].is_suppressed) result++;
|
||||
if (!leaks_[i].is_suppressed)
|
||||
result++;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -922,16 +925,16 @@ uptr LeakReport::IndirectUnsuppressedLeakCount() {
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
#else // CAN_SANITIZE_LEAKS
|
||||
} // namespace __lsan
|
||||
#else // CAN_SANITIZE_LEAKS
|
||||
namespace __lsan {
|
||||
void InitCommonLsan() { }
|
||||
void DoLeakCheck() { }
|
||||
void DoRecoverableLeakCheckVoid() { }
|
||||
void DisableInThisThread() { }
|
||||
void EnableInThisThread() { }
|
||||
}
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
void InitCommonLsan() {}
|
||||
void DoLeakCheck() {}
|
||||
void DoRecoverableLeakCheckVoid() {}
|
||||
void DisableInThisThread() {}
|
||||
void EnableInThisThread() {}
|
||||
} // namespace __lsan
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
||||
using namespace __lsan;
|
||||
|
||||
@ -948,11 +951,13 @@ void __lsan_ignore_object(const void *p) {
|
||||
if (res == kIgnoreObjectInvalid)
|
||||
VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
|
||||
if (res == kIgnoreObjectAlreadyIgnored)
|
||||
VReport(1, "__lsan_ignore_object(): "
|
||||
"heap object at %p is already being ignored\n", p);
|
||||
VReport(1,
|
||||
"__lsan_ignore_object(): "
|
||||
"heap object at %p is already being ignored\n",
|
||||
p);
|
||||
if (res == kIgnoreObjectSuccess)
|
||||
VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
@ -962,7 +967,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
|
||||
RootRegion region = {reinterpret_cast<uptr>(begin), size};
|
||||
root_regions.push_back(region);
|
||||
VReport(1, "Registered root region at %p of size %zu\n", begin, size);
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
@ -988,7 +993,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
|
||||
begin, size);
|
||||
Die();
|
||||
}
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
@ -1010,7 +1015,7 @@ void __lsan_do_leak_check() {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
if (common_flags()->detect_leaks)
|
||||
__lsan::DoLeakCheck();
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
@ -1018,7 +1023,7 @@ int __lsan_do_recoverable_leak_check() {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
if (common_flags()->detect_leaks)
|
||||
return __lsan::DoRecoverableLeakCheck();
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1027,14 +1032,14 @@ SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
|
||||
}
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
int __lsan_is_turned_off() {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
|
||||
__lsan_is_turned_off() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
const char *__lsan_default_suppressions() {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
|
||||
__lsan_default_suppressions() {
|
||||
return "";
|
||||
}
|
||||
#endif
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
|
@ -33,21 +33,21 @@
|
||||
// Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
|
||||
// is missing. This caused a link error.
|
||||
#if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
|
||||
#define CAN_SANITIZE_LEAKS 0
|
||||
# define CAN_SANITIZE_LEAKS 0
|
||||
#elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \
|
||||
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
|
||||
defined(__powerpc64__) || defined(__s390x__))
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
# define CAN_SANITIZE_LEAKS 1
|
||||
#elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC)
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
# define CAN_SANITIZE_LEAKS 1
|
||||
#elif defined(__arm__) && SANITIZER_LINUX
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
# define CAN_SANITIZE_LEAKS 1
|
||||
#elif SANITIZER_RISCV64 && SANITIZER_LINUX
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
# define CAN_SANITIZE_LEAKS 1
|
||||
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
# define CAN_SANITIZE_LEAKS 1
|
||||
#else
|
||||
#define CAN_SANITIZE_LEAKS 0
|
||||
# define CAN_SANITIZE_LEAKS 0
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
@ -82,6 +82,15 @@ extern Flags lsan_flags;
|
||||
inline Flags *flags() { return &lsan_flags; }
|
||||
void RegisterLsanFlags(FlagParser *parser, Flags *f);
|
||||
|
||||
struct LeakedChunk {
|
||||
uptr chunk;
|
||||
u32 stack_trace_id;
|
||||
uptr leaked_size;
|
||||
ChunkTag tag;
|
||||
};
|
||||
|
||||
using LeakedChunks = InternalMmapVector<LeakedChunk>;
|
||||
|
||||
struct Leak {
|
||||
u32 id;
|
||||
uptr hit_count;
|
||||
@ -101,8 +110,7 @@ struct LeakedObject {
|
||||
class LeakReport {
|
||||
public:
|
||||
LeakReport() {}
|
||||
void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
|
||||
ChunkTag tag);
|
||||
void AddLeakedChunks(const LeakedChunks &chunks);
|
||||
void ReportTopLeaks(uptr max_leaks);
|
||||
void PrintSummary();
|
||||
uptr ApplySuppressions();
|
||||
@ -136,7 +144,7 @@ struct RootRegion {
|
||||
// threads and enumerating roots.
|
||||
struct CheckForLeaksParam {
|
||||
Frontier frontier;
|
||||
LeakReport leak_report;
|
||||
LeakedChunks leaks;
|
||||
bool success = false;
|
||||
};
|
||||
|
||||
@ -222,8 +230,24 @@ void UnlockAllocator();
|
||||
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
|
||||
bool WordIsPoisoned(uptr addr);
|
||||
// Wrappers for ThreadRegistry access.
|
||||
void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
|
||||
void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
|
||||
void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
|
||||
|
||||
struct ScopedStopTheWorldLock {
|
||||
ScopedStopTheWorldLock() {
|
||||
LockThreadRegistry();
|
||||
LockAllocator();
|
||||
}
|
||||
|
||||
~ScopedStopTheWorldLock() {
|
||||
UnlockAllocator();
|
||||
UnlockThreadRegistry();
|
||||
}
|
||||
|
||||
ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
|
||||
ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
|
||||
};
|
||||
|
||||
ThreadRegistry *GetThreadRegistryLocked();
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
|
@ -52,14 +52,22 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
|
||||
// behavior and causes rare race conditions.
|
||||
void HandleLeaks() {}
|
||||
|
||||
// This is defined differently in asan_fuchsia.cpp and lsan_fuchsia.cpp.
|
||||
bool UseExitcodeOnLeak();
|
||||
|
||||
int ExitHook(int status) {
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
if (UseExitcodeOnLeak())
|
||||
DoLeakCheck();
|
||||
else
|
||||
DoRecoverableLeakCheckVoid();
|
||||
}
|
||||
return status == 0 && HasReportedLeaks() ? common_flags()->exitcode : status;
|
||||
}
|
||||
|
||||
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
|
||||
CheckForLeaksParam *argument) {
|
||||
LockThreadRegistry();
|
||||
LockAllocator();
|
||||
ScopedStopTheWorldLock lock;
|
||||
|
||||
struct Params {
|
||||
InternalMmapVector<uptr> allocator_caches;
|
||||
@ -149,9 +157,6 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
|
||||
params->callback(SuspendedThreadsListFuchsia(), params->argument);
|
||||
},
|
||||
¶ms);
|
||||
|
||||
UnlockAllocator();
|
||||
UnlockThreadRegistry();
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
@ -122,12 +122,9 @@ void HandleLeaks() {
|
||||
|
||||
static int LockStuffAndStopTheWorldCallback(struct dl_phdr_info *info,
|
||||
size_t size, void *data) {
|
||||
LockThreadRegistry();
|
||||
LockAllocator();
|
||||
ScopedStopTheWorldLock lock;
|
||||
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
|
||||
StopTheWorld(param->callback, param->argument);
|
||||
UnlockAllocator();
|
||||
UnlockThreadRegistry();
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -143,16 +143,16 @@ void ProcessGlobalRegions(Frontier *frontier) {
|
||||
}
|
||||
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||
unsigned depth = 1;
|
||||
vm_size_t size = 0;
|
||||
vm_address_t address = 0;
|
||||
kern_return_t err = KERN_SUCCESS;
|
||||
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
|
||||
|
||||
InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
|
||||
|
||||
while (err == KERN_SUCCESS) {
|
||||
vm_size_t size = 0;
|
||||
unsigned depth = 1;
|
||||
struct vm_region_submap_info_64 info;
|
||||
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
|
||||
err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
|
||||
(vm_region_info_t)&info, &count);
|
||||
|
||||
@ -195,11 +195,8 @@ void HandleLeaks() {}
|
||||
|
||||
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
|
||||
CheckForLeaksParam *argument) {
|
||||
LockThreadRegistry();
|
||||
LockAllocator();
|
||||
ScopedStopTheWorldLock lock;
|
||||
StopTheWorld(callback, argument);
|
||||
UnlockAllocator();
|
||||
UnlockThreadRegistry();
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
@ -62,7 +62,7 @@ void InitializeMainThread() {
|
||||
OnCreatedArgs args;
|
||||
__sanitizer::GetThreadStackTopAndBottom(true, &args.stack_end,
|
||||
&args.stack_begin);
|
||||
u32 tid = ThreadCreate(0, GetThreadSelf(), true, &args);
|
||||
u32 tid = ThreadCreate(kMainTid, true, &args);
|
||||
CHECK_EQ(tid, 0);
|
||||
ThreadStart(tid);
|
||||
}
|
||||
@ -76,6 +76,13 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
|
||||
caches);
|
||||
}
|
||||
|
||||
// On Fuchsia, leak detection is done by a special hook after atexit hooks.
|
||||
// So this doesn't install any atexit hook like on other platforms.
|
||||
void InstallAtExitCheckLeaks() {}
|
||||
|
||||
// ASan defines this to check its `halt_on_error` flag.
|
||||
bool UseExitcodeOnLeak() { return true; }
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
// These are declared (in extern "C") by <zircon/sanitizer.h>.
|
||||
@ -86,14 +93,13 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
|
||||
void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
|
||||
const char *name, void *stack_base,
|
||||
size_t stack_size) {
|
||||
uptr user_id = reinterpret_cast<uptr>(thread);
|
||||
ENSURE_LSAN_INITED;
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
OnCreatedArgs args;
|
||||
args.stack_begin = reinterpret_cast<uptr>(stack_base);
|
||||
args.stack_end = args.stack_begin + stack_size;
|
||||
u32 parent_tid = GetCurrentThread();
|
||||
u32 tid = ThreadCreate(parent_tid, user_id, detached, &args);
|
||||
u32 tid = ThreadCreate(parent_tid, detached, &args);
|
||||
return reinterpret_cast<void *>(static_cast<uptr>(tid));
|
||||
}
|
||||
|
||||
|
@ -468,8 +468,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
|
||||
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
|
||||
}
|
||||
if (res == 0) {
|
||||
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
|
||||
IsStateDetached(detached));
|
||||
int tid = ThreadCreate(GetCurrentThread(), IsStateDetached(detached));
|
||||
CHECK_NE(tid, kMainTid);
|
||||
atomic_store(&p.tid, tid, memory_order_release);
|
||||
while (atomic_load(&p.tid, memory_order_acquire) != 0)
|
||||
@ -480,23 +479,11 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_join, void *th, void **ret) {
|
||||
ENSURE_LSAN_INITED;
|
||||
int tid = ThreadTid((uptr)th);
|
||||
int res = REAL(pthread_join)(th, ret);
|
||||
if (res == 0)
|
||||
ThreadJoin(tid);
|
||||
return res;
|
||||
INTERCEPTOR(int, pthread_join, void *t, void **arg) {
|
||||
return REAL(pthread_join)(t, arg);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_detach, void *th) {
|
||||
ENSURE_LSAN_INITED;
|
||||
int tid = ThreadTid((uptr)th);
|
||||
int res = REAL(pthread_detach)(th);
|
||||
if (res == 0)
|
||||
ThreadDetach(tid);
|
||||
return res;
|
||||
}
|
||||
DEFINE_REAL_PTHREAD_FUNCTIONS
|
||||
|
||||
INTERCEPTOR(void, _exit, int status) {
|
||||
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
|
||||
@ -530,7 +517,6 @@ void InitializeInterceptors() {
|
||||
LSAN_MAYBE_INTERCEPT_MALLINFO;
|
||||
LSAN_MAYBE_INTERCEPT_MALLOPT;
|
||||
INTERCEPT_FUNCTION(pthread_create);
|
||||
INTERCEPT_FUNCTION(pthread_detach);
|
||||
INTERCEPT_FUNCTION(pthread_join);
|
||||
INTERCEPT_FUNCTION(_exit);
|
||||
|
||||
|
@ -68,7 +68,7 @@ typedef struct {
|
||||
ALWAYS_INLINE
|
||||
void lsan_register_worker_thread(int parent_tid) {
|
||||
if (GetCurrentThread() == kInvalidTid) {
|
||||
u32 tid = ThreadCreate(parent_tid, 0, true);
|
||||
u32 tid = ThreadCreate(parent_tid, true);
|
||||
ThreadStart(tid, GetTid());
|
||||
SetCurrentThread(tid);
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
}
|
||||
|
||||
void InitializeMainThread() {
|
||||
u32 tid = ThreadCreate(kMainTid, 0, true);
|
||||
u32 tid = ThreadCreate(kMainTid, true);
|
||||
CHECK_EQ(tid, kMainTid);
|
||||
ThreadStart(tid, GetTid());
|
||||
}
|
||||
@ -91,6 +91,11 @@ void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
nullptr);
|
||||
}
|
||||
|
||||
void InstallAtExitCheckLeaks() {
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
|
||||
Atexit(DoLeakCheck);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
#endif // SANITIZER_POSIX
|
||||
|
@ -44,8 +44,8 @@ void ThreadContextLsanBase::OnFinished() {
|
||||
DTLS_Destroy();
|
||||
}
|
||||
|
||||
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached, void *arg) {
|
||||
return thread_registry->CreateThread(user_id, detached, parent_tid, arg);
|
||||
u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) {
|
||||
return thread_registry->CreateThread(0, detached, parent_tid, arg);
|
||||
}
|
||||
|
||||
void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id,
|
||||
@ -68,28 +68,6 @@ ThreadContext *CurrentThreadContext() {
|
||||
return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
|
||||
}
|
||||
|
||||
static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
|
||||
uptr uid = (uptr)arg;
|
||||
if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
u32 ThreadTid(uptr uid) {
|
||||
return thread_registry->FindThread(FindThreadByUid, (void *)uid);
|
||||
}
|
||||
|
||||
void ThreadDetach(u32 tid) {
|
||||
CHECK_NE(tid, kInvalidTid);
|
||||
thread_registry->DetachThread(tid, /* arg */ nullptr);
|
||||
}
|
||||
|
||||
void ThreadJoin(u32 tid) {
|
||||
CHECK_NE(tid, kInvalidTid);
|
||||
thread_registry->JoinThread(tid, /* arg */ nullptr);
|
||||
}
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() {
|
||||
if (GetCurrentThread() == kMainTid)
|
||||
CurrentThreadContext()->os_id = GetTid();
|
||||
|
@ -45,11 +45,8 @@ class ThreadContext;
|
||||
void InitializeThreadRegistry();
|
||||
void InitializeMainThread();
|
||||
|
||||
u32 ThreadCreate(u32 tid, uptr uid, bool detached, void *arg = nullptr);
|
||||
u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr);
|
||||
void ThreadFinish();
|
||||
void ThreadDetach(u32 tid);
|
||||
void ThreadJoin(u32 tid);
|
||||
u32 ThreadTid(uptr uid);
|
||||
|
||||
u32 GetCurrentThread();
|
||||
void SetCurrentThread(u32 tid);
|
||||
|
@ -201,7 +201,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
|
||||
}
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
|
||||
void AddrHashMap<T, kSize>::acquire(Handle *h)
|
||||
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
uptr addr = h->addr_;
|
||||
uptr hash = calcHash(addr);
|
||||
Bucket *b = &table_[hash];
|
||||
@ -330,7 +331,8 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
|
||||
}
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
|
||||
void AddrHashMap<T, kSize>::release(Handle *h)
|
||||
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
if (!h->cell_)
|
||||
return;
|
||||
Bucket *b = h->bucket_;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
@ -24,66 +25,6 @@ namespace __sanitizer {
|
||||
const char *PrimaryAllocatorName = "SizeClassAllocator";
|
||||
const char *SecondaryAllocatorName = "LargeMmapAllocator";
|
||||
|
||||
// ThreadSanitizer for Go uses libc malloc/free.
|
||||
#if defined(SANITIZER_USE_MALLOC)
|
||||
# if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
extern "C" void *__libc_malloc(uptr size);
|
||||
# if !SANITIZER_GO
|
||||
extern "C" void *__libc_memalign(uptr alignment, uptr size);
|
||||
# endif
|
||||
extern "C" void *__libc_realloc(void *ptr, uptr size);
|
||||
extern "C" void __libc_free(void *ptr);
|
||||
# else
|
||||
# include <stdlib.h>
|
||||
# define __libc_malloc malloc
|
||||
# if !SANITIZER_GO
|
||||
static void *__libc_memalign(uptr alignment, uptr size) {
|
||||
void *p;
|
||||
uptr error = posix_memalign(&p, alignment, size);
|
||||
if (error) return nullptr;
|
||||
return p;
|
||||
}
|
||||
# endif
|
||||
# define __libc_realloc realloc
|
||||
# define __libc_free free
|
||||
# endif
|
||||
|
||||
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
|
||||
uptr alignment) {
|
||||
(void)cache;
|
||||
#if !SANITIZER_GO
|
||||
if (alignment == 0)
|
||||
return __libc_malloc(size);
|
||||
else
|
||||
return __libc_memalign(alignment, size);
|
||||
#else
|
||||
// Windows does not provide __libc_memalign/posix_memalign. It provides
|
||||
// __aligned_malloc, but the allocated blocks can't be passed to free,
|
||||
// they need to be passed to __aligned_free. InternalAlloc interface does
|
||||
// not account for such requirement. Alignemnt does not seem to be used
|
||||
// anywhere in runtime, so just call __libc_malloc for now.
|
||||
DCHECK_EQ(alignment, 0);
|
||||
return __libc_malloc(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void *RawInternalRealloc(void *ptr, uptr size,
|
||||
InternalAllocatorCache *cache) {
|
||||
(void)cache;
|
||||
return __libc_realloc(ptr, size);
|
||||
}
|
||||
|
||||
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||
(void)cache;
|
||||
__libc_free(ptr);
|
||||
}
|
||||
|
||||
InternalAllocator *internal_allocator() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
|
||||
|
||||
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
|
||||
static atomic_uint8_t internal_allocator_initialized;
|
||||
static StaticSpinMutex internal_alloc_init_mu;
|
||||
@ -135,8 +76,6 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||
internal_allocator()->Deallocate(cache, ptr);
|
||||
}
|
||||
|
||||
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
|
||||
|
||||
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
|
||||
SetAllocatorOutOfMemory();
|
||||
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
|
||||
@ -187,6 +126,16 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
|
||||
RawInternalFree(addr, cache);
|
||||
}
|
||||
|
||||
void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
internal_allocator_cache_mu.Lock();
|
||||
internal_allocator()->ForceLock();
|
||||
}
|
||||
|
||||
void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
internal_allocator()->ForceUnlock();
|
||||
internal_allocator_cache_mu.Unlock();
|
||||
}
|
||||
|
||||
// LowLevelAllocator
|
||||
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
|
||||
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
|
||||
@ -247,4 +196,14 @@ void PrintHintAllocatorCannotReturnNull() {
|
||||
"allocator_may_return_null=1\n");
|
||||
}
|
||||
|
||||
static atomic_uint8_t rss_limit_exceeded;
|
||||
|
||||
bool IsRssLimitExceeded() {
|
||||
return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetRssLimitExceeded(bool limit_exceeded) {
|
||||
atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -70,6 +70,9 @@ inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
|
||||
#include "sanitizer_allocator_secondary.h"
|
||||
#include "sanitizer_allocator_combined.h"
|
||||
|
||||
bool IsRssLimitExceeded();
|
||||
void SetRssLimitExceeded(bool limit_exceeded);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_H
|
||||
|
@ -175,12 +175,12 @@ class CombinedAllocator {
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
|
||||
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
primary_.ForceLock();
|
||||
secondary_.ForceLock();
|
||||
}
|
||||
|
||||
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
|
||||
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
secondary_.ForceUnlock();
|
||||
primary_.ForceUnlock();
|
||||
}
|
||||
|
@ -48,6 +48,8 @@ void *InternalReallocArray(void *p, uptr count, uptr size,
|
||||
void *InternalCalloc(uptr count, uptr size,
|
||||
InternalAllocatorCache *cache = nullptr);
|
||||
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
|
||||
void InternalAllocatorLock();
|
||||
void InternalAllocatorUnlock();
|
||||
InternalAllocator *internal_allocator();
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -238,13 +238,13 @@ class SizeClassAllocator32 {
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
|
||||
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
GetSizeClassInfo(i)->mutex.Lock();
|
||||
}
|
||||
}
|
||||
|
||||
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
|
||||
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
for (int i = kNumClasses - 1; i >= 0; i--) {
|
||||
GetSizeClassInfo(i)->mutex.Unlock();
|
||||
}
|
||||
|
@ -354,13 +354,13 @@ class SizeClassAllocator64 {
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
|
||||
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
GetRegionInfo(i)->mutex.Lock();
|
||||
}
|
||||
}
|
||||
|
||||
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
|
||||
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
|
||||
GetRegionInfo(i)->mutex.Unlock();
|
||||
}
|
||||
|
@ -267,9 +267,9 @@ class LargeMmapAllocator {
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
|
||||
void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
|
||||
|
||||
void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
|
||||
void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
|
@ -74,13 +74,12 @@ template <typename T>
|
||||
inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
|
||||
typename T::Type xchg,
|
||||
memory_order mo) {
|
||||
typedef typename T::Type Type;
|
||||
Type cmpv = *cmp;
|
||||
Type prev;
|
||||
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
if (prev == cmpv) return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
// Transitioned from __sync_val_compare_and_swap to support targets like
|
||||
// SPARC V8 that cannot inline atomic cmpxchg. __atomic_compare_exchange
|
||||
// can then be resolved from libatomic. __ATOMIC_SEQ_CST is used to best
|
||||
// match the __sync builtin memory order.
|
||||
return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,
|
||||
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
@ -11,7 +11,6 @@
|
||||
|
||||
#include "sanitizer_chained_origin_depot.h"
|
||||
|
||||
#include "sanitizer_persistent_allocator.h"
|
||||
#include "sanitizer_stackdepotbase.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
@ -11,10 +11,12 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
#include "sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
|
||||
@ -138,13 +140,21 @@ void LoadedModule::set(const char *module_name, uptr base_address,
|
||||
set(module_name, base_address);
|
||||
arch_ = arch;
|
||||
internal_memcpy(uuid_, uuid, sizeof(uuid_));
|
||||
uuid_size_ = kModuleUUIDSize;
|
||||
instrumented_ = instrumented;
|
||||
}
|
||||
|
||||
void LoadedModule::setUuid(const char *uuid, uptr size) {
|
||||
if (size > kModuleUUIDSize)
|
||||
size = kModuleUUIDSize;
|
||||
internal_memcpy(uuid_, uuid, size);
|
||||
uuid_size_ = size;
|
||||
}
|
||||
|
||||
void LoadedModule::clear() {
|
||||
InternalFree(full_name_);
|
||||
base_address_ = 0;
|
||||
max_executable_address_ = 0;
|
||||
max_address_ = 0;
|
||||
full_name_ = nullptr;
|
||||
arch_ = kModuleArchUnknown;
|
||||
internal_memset(uuid_, 0, kModuleUUIDSize);
|
||||
@ -162,8 +172,7 @@ void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
|
||||
AddressRange *r =
|
||||
new(mem) AddressRange(beg, end, executable, writable, name);
|
||||
ranges_.push_back(r);
|
||||
if (executable && end > max_executable_address_)
|
||||
max_executable_address_ = end;
|
||||
max_address_ = Max(max_address_, end);
|
||||
}
|
||||
|
||||
bool LoadedModule::containsAddress(uptr address) const {
|
||||
@ -301,18 +310,22 @@ struct MallocFreeHook {
|
||||
|
||||
static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
|
||||
|
||||
void RunMallocHooks(const void *ptr, uptr size) {
|
||||
void RunMallocHooks(void *ptr, uptr size) {
|
||||
__sanitizer_malloc_hook(ptr, size);
|
||||
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
|
||||
auto hook = MFHooks[i].malloc_hook;
|
||||
if (!hook) return;
|
||||
if (!hook)
|
||||
break;
|
||||
hook(ptr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void RunFreeHooks(const void *ptr) {
|
||||
void RunFreeHooks(void *ptr) {
|
||||
__sanitizer_free_hook(ptr);
|
||||
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
|
||||
auto hook = MFHooks[i].free_hook;
|
||||
if (!hook) return;
|
||||
if (!hook)
|
||||
break;
|
||||
hook(ptr);
|
||||
}
|
||||
}
|
||||
@ -360,4 +373,16 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
|
||||
void (*free_hook)(const void *)) {
|
||||
return InstallMallocFreeHooks(malloc_hook, free_hook);
|
||||
}
|
||||
|
||||
// Provide default (no-op) implementation of malloc hooks.
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr,
|
||||
uptr size) {
|
||||
(void)ptr;
|
||||
(void)size;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
|
||||
(void)ptr;
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
@ -16,7 +16,6 @@
|
||||
#define SANITIZER_COMMON_H
|
||||
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_list.h"
|
||||
@ -171,8 +170,8 @@ void SetShadowRegionHugePageMode(uptr addr, uptr length);
|
||||
bool DontDumpShadowMemory(uptr addr, uptr length);
|
||||
// Check if the built VMA size matches the runtime one.
|
||||
void CheckVMASize();
|
||||
void RunMallocHooks(const void *ptr, uptr size);
|
||||
void RunFreeHooks(const void *ptr);
|
||||
void RunMallocHooks(void *ptr, uptr size);
|
||||
void RunFreeHooks(void *ptr);
|
||||
|
||||
class ReservedAddressRange {
|
||||
public:
|
||||
@ -238,12 +237,12 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
|
||||
// Lock sanitizer error reporting and protects against nested errors.
|
||||
class ScopedErrorReportLock {
|
||||
public:
|
||||
ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
|
||||
~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
|
||||
ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
|
||||
~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
|
||||
|
||||
static void Lock() ACQUIRE(mutex_);
|
||||
static void Unlock() RELEASE(mutex_);
|
||||
static void CheckLocked() CHECK_LOCKED(mutex_);
|
||||
static void Lock() SANITIZER_ACQUIRE(mutex_);
|
||||
static void Unlock() SANITIZER_RELEASE(mutex_);
|
||||
static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
|
||||
|
||||
private:
|
||||
static atomic_uintptr_t reporting_thread_;
|
||||
@ -286,7 +285,7 @@ void SetStackSizeLimitInBytes(uptr limit);
|
||||
bool AddressSpaceIsUnlimited();
|
||||
void SetAddressSpaceUnlimited();
|
||||
void AdjustStackSize(void *attr);
|
||||
void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
|
||||
void PlatformPrepareForSandboxing(void *args);
|
||||
void SetSandboxingCallback(void (*f)());
|
||||
|
||||
void InitializeCoverage(bool enabled, const char *coverage_dir);
|
||||
@ -326,12 +325,6 @@ void SetUserDieCallback(DieCallbackType callback);
|
||||
|
||||
void SetCheckUnwindCallback(void (*callback)());
|
||||
|
||||
// Callback will be called if soft_rss_limit_mb is given and the limit is
|
||||
// exceeded (exceeded==true) or if rss went down below the limit
|
||||
// (exceeded==false).
|
||||
// The callback should be registered once at the tool init time.
|
||||
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
|
||||
|
||||
// Functions related to signal handling.
|
||||
typedef void (*SignalHandlerType)(int, void *, void *);
|
||||
HandleSignalMode GetHandleSignalMode(int signum);
|
||||
@ -460,6 +453,10 @@ template <class T>
|
||||
constexpr T Max(T a, T b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
template <class T>
|
||||
constexpr T Abs(T a) {
|
||||
return a < 0 ? -a : a;
|
||||
}
|
||||
template<class T> void Swap(T& a, T& b) {
|
||||
T tmp = a;
|
||||
a = b;
|
||||
@ -669,11 +666,9 @@ void Sort(T *v, uptr size, Compare comp = {}) {
|
||||
|
||||
// Works like std::lower_bound: finds the first element that is not less
|
||||
// than the val.
|
||||
template <class Container,
|
||||
template <class Container, class T,
|
||||
class Compare = CompareLess<typename Container::value_type>>
|
||||
uptr InternalLowerBound(const Container &v,
|
||||
const typename Container::value_type &val,
|
||||
Compare comp = {}) {
|
||||
uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
|
||||
uptr first = 0;
|
||||
uptr last = v.size();
|
||||
while (last > first) {
|
||||
@ -743,6 +738,9 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
|
||||
uptr *read_len, uptr max_len = kDefaultFileMaxSize,
|
||||
error_t *errno_p = nullptr);
|
||||
|
||||
int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
|
||||
uptr *pc_offset);
|
||||
|
||||
// When adding a new architecture, don't forget to also update
|
||||
// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
|
||||
inline const char *ModuleArchToString(ModuleArch arch) {
|
||||
@ -774,7 +772,7 @@ inline const char *ModuleArchToString(ModuleArch arch) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const uptr kModuleUUIDSize = 16;
|
||||
const uptr kModuleUUIDSize = 32;
|
||||
const uptr kMaxSegName = 16;
|
||||
|
||||
// Represents a binary loaded into virtual memory (e.g. this can be an
|
||||
@ -784,8 +782,9 @@ class LoadedModule {
|
||||
LoadedModule()
|
||||
: full_name_(nullptr),
|
||||
base_address_(0),
|
||||
max_executable_address_(0),
|
||||
max_address_(0),
|
||||
arch_(kModuleArchUnknown),
|
||||
uuid_size_(0),
|
||||
instrumented_(false) {
|
||||
internal_memset(uuid_, 0, kModuleUUIDSize);
|
||||
ranges_.clear();
|
||||
@ -793,6 +792,7 @@ class LoadedModule {
|
||||
void set(const char *module_name, uptr base_address);
|
||||
void set(const char *module_name, uptr base_address, ModuleArch arch,
|
||||
u8 uuid[kModuleUUIDSize], bool instrumented);
|
||||
void setUuid(const char *uuid, uptr size);
|
||||
void clear();
|
||||
void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
|
||||
const char *name = nullptr);
|
||||
@ -800,9 +800,10 @@ class LoadedModule {
|
||||
|
||||
const char *full_name() const { return full_name_; }
|
||||
uptr base_address() const { return base_address_; }
|
||||
uptr max_executable_address() const { return max_executable_address_; }
|
||||
uptr max_address() const { return max_address_; }
|
||||
ModuleArch arch() const { return arch_; }
|
||||
const u8 *uuid() const { return uuid_; }
|
||||
uptr uuid_size() const { return uuid_size_; }
|
||||
bool instrumented() const { return instrumented_; }
|
||||
|
||||
struct AddressRange {
|
||||
@ -829,8 +830,9 @@ class LoadedModule {
|
||||
private:
|
||||
char *full_name_; // Owned.
|
||||
uptr base_address_;
|
||||
uptr max_executable_address_;
|
||||
uptr max_address_;
|
||||
ModuleArch arch_;
|
||||
uptr uuid_size_;
|
||||
u8 uuid_[kModuleUUIDSize];
|
||||
bool instrumented_;
|
||||
IntrusiveList<AddressRange> ranges_;
|
||||
@ -956,7 +958,7 @@ struct SignalContext {
|
||||
uptr sp;
|
||||
uptr bp;
|
||||
bool is_memory_access;
|
||||
enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
|
||||
enum WriteFlag { Unknown, Read, Write } write_flag;
|
||||
|
||||
// In some cases the kernel cannot provide the true faulting address; `addr`
|
||||
// will be zero then. This field allows to distinguish between these cases
|
||||
|
@ -21,7 +21,7 @@
|
||||
// COMMON_INTERCEPTOR_FD_RELEASE
|
||||
// COMMON_INTERCEPTOR_FD_ACCESS
|
||||
// COMMON_INTERCEPTOR_SET_THREAD_NAME
|
||||
// COMMON_INTERCEPTOR_ON_DLOPEN
|
||||
// COMMON_INTERCEPTOR_DLOPEN
|
||||
// COMMON_INTERCEPTOR_ON_EXIT
|
||||
// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
|
||||
@ -132,6 +132,76 @@ extern const short *_toupper_tab_;
|
||||
extern const short *_tolower_tab_;
|
||||
#endif
|
||||
|
||||
#if SANITIZER_MUSL && \
|
||||
(defined(__i386__) || defined(__arm__) || SANITIZER_MIPS32 || SANITIZER_PPC32)
|
||||
// musl 1.2.0 on existing 32-bit architectures uses new symbol names for the
|
||||
// time-related functions that take 64-bit time_t values. See
|
||||
// https://musl.libc.org/time64.html
|
||||
#define adjtime __adjtime64
|
||||
#define adjtimex __adjtimex_time64
|
||||
#define aio_suspend __aio_suspend_time64
|
||||
#define clock_adjtime __clock_adjtime64
|
||||
#define clock_getres __clock_getres_time64
|
||||
#define clock_gettime __clock_gettime64
|
||||
#define clock_nanosleep __clock_nanosleep_time64
|
||||
#define clock_settime __clock_settime64
|
||||
#define cnd_timedwait __cnd_timedwait_time64
|
||||
#define ctime __ctime64
|
||||
#define ctime_r __ctime64_r
|
||||
#define difftime __difftime64
|
||||
#define dlsym __dlsym_time64
|
||||
#define fstatat __fstatat_time64
|
||||
#define fstat __fstat_time64
|
||||
#define ftime __ftime64
|
||||
#define futimens __futimens_time64
|
||||
#define futimesat __futimesat_time64
|
||||
#define futimes __futimes_time64
|
||||
#define getitimer __getitimer_time64
|
||||
#define getrusage __getrusage_time64
|
||||
#define gettimeofday __gettimeofday_time64
|
||||
#define gmtime __gmtime64
|
||||
#define gmtime_r __gmtime64_r
|
||||
#define localtime __localtime64
|
||||
#define localtime_r __localtime64_r
|
||||
#define lstat __lstat_time64
|
||||
#define lutimes __lutimes_time64
|
||||
#define mktime __mktime64
|
||||
#define mq_timedreceive __mq_timedreceive_time64
|
||||
#define mq_timedsend __mq_timedsend_time64
|
||||
#define mtx_timedlock __mtx_timedlock_time64
|
||||
#define nanosleep __nanosleep_time64
|
||||
#define ppoll __ppoll_time64
|
||||
#define pselect __pselect_time64
|
||||
#define pthread_cond_timedwait __pthread_cond_timedwait_time64
|
||||
#define pthread_mutex_timedlock __pthread_mutex_timedlock_time64
|
||||
#define pthread_rwlock_timedrdlock __pthread_rwlock_timedrdlock_time64
|
||||
#define pthread_rwlock_timedwrlock __pthread_rwlock_timedwrlock_time64
|
||||
#define pthread_timedjoin_np __pthread_timedjoin_np_time64
|
||||
#define recvmmsg __recvmmsg_time64
|
||||
#define sched_rr_get_interval __sched_rr_get_interval_time64
|
||||
#define select __select_time64
|
||||
#define semtimedop __semtimedop_time64
|
||||
#define sem_timedwait __sem_timedwait_time64
|
||||
#define setitimer __setitimer_time64
|
||||
#define settimeofday __settimeofday_time64
|
||||
#define sigtimedwait __sigtimedwait_time64
|
||||
#define stat __stat_time64
|
||||
#define stime __stime64
|
||||
#define thrd_sleep __thrd_sleep_time64
|
||||
#define timegm __timegm_time64
|
||||
#define timerfd_gettime __timerfd_gettime64
|
||||
#define timerfd_settime __timerfd_settime64
|
||||
#define timer_gettime __timer_gettime64
|
||||
#define timer_settime __timer_settime64
|
||||
#define timespec_get __timespec_get_time64
|
||||
#define time __time64
|
||||
#define utimensat __utimensat_time64
|
||||
#define utimes __utimes_time64
|
||||
#define utime __utime64
|
||||
#define wait3 __wait3_time64
|
||||
#define wait4 __wait4_time64
|
||||
#endif
|
||||
|
||||
// Platform-specific options.
|
||||
#if SANITIZER_MAC
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||
@ -206,9 +276,9 @@ extern const short *_tolower_tab_;
|
||||
COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
|
||||
common_flags()->strict_string_checks ? (internal_strlen(s)) + 1 : (n) )
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
|
||||
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
|
||||
CheckNoDeepBind(filename, flag);
|
||||
#ifndef COMMON_INTERCEPTOR_DLOPEN
|
||||
#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
|
||||
({ CheckNoDeepBind(filename, flag); REAL(dlopen)(filename, flag); })
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
|
||||
@ -1295,12 +1365,16 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
|
||||
static const int PR_SET_NAME = 15;
|
||||
static const int PR_SCHED_CORE = 62;
|
||||
static const int PR_SCHED_CORE_GET = 0;
|
||||
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
|
||||
if (option == PR_SET_NAME) {
|
||||
char buff[16];
|
||||
internal_strncpy(buff, (char *)arg2, 15);
|
||||
buff[15] = 0;
|
||||
COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
|
||||
} else if (res != -1 && option == PR_SCHED_CORE && arg2 == PR_SCHED_CORE_GET) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64*)(arg5), sizeof(u64));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -2422,6 +2496,34 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
|
||||
#define INIT_GLOB64
|
||||
#endif // SANITIZER_INTERCEPT_GLOB64
|
||||
|
||||
#if SANITIZER_INTERCEPT___B64_TO
|
||||
INTERCEPTOR(int, __b64_ntop, unsigned char const *src, SIZE_T srclength,
|
||||
char *target, SIZE_T targsize) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, __b64_ntop, src, srclength, target, targsize);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, srclength);
|
||||
int res = REAL(__b64_ntop)(src, srclength, target, targsize);
|
||||
if (res >= 0)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, target, res + 1);
|
||||
return res;
|
||||
}
|
||||
INTERCEPTOR(int, __b64_pton, char const *src, char *target, SIZE_T targsize) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, __b64_pton, src, target, targsize);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
|
||||
int res = REAL(__b64_pton)(src, target, targsize);
|
||||
if (res >= 0)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, target, res);
|
||||
return res;
|
||||
}
|
||||
# define INIT___B64_TO \
|
||||
COMMON_INTERCEPT_FUNCTION(__b64_ntop); \
|
||||
COMMON_INTERCEPT_FUNCTION(__b64_pton);
|
||||
#else // SANITIZER_INTERCEPT___B64_TO
|
||||
#define INIT___B64_TO
|
||||
#endif // SANITIZER_INTERCEPT___B64_TO
|
||||
|
||||
|
||||
#if SANITIZER_INTERCEPT_POSIX_SPAWN
|
||||
|
||||
template <class RealSpawnPtr>
|
||||
@ -6380,8 +6482,7 @@ INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
|
||||
if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
|
||||
COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
|
||||
void *res = REAL(dlopen)(filename, flag);
|
||||
void *res = COMMON_INTERCEPTOR_DLOPEN(filename, flag);
|
||||
Symbolizer::GetOrInit()->InvalidateModuleList();
|
||||
COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
|
||||
return res;
|
||||
@ -6872,6 +6973,23 @@ INTERCEPTOR(int, stat, const char *path, void *buf) {
|
||||
#define INIT_STAT
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_STAT64
|
||||
INTERCEPTOR(int, stat64, const char *path, void *buf) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, stat64, path, buf);
|
||||
if (common_flags()->intercept_stat)
|
||||
COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
|
||||
int res = REAL(stat64)(path, buf);
|
||||
if (!res)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
|
||||
return res;
|
||||
}
|
||||
#define INIT_STAT64 COMMON_INTERCEPT_FUNCTION(stat64)
|
||||
#else
|
||||
#define INIT_STAT64
|
||||
#endif
|
||||
|
||||
|
||||
#if SANITIZER_INTERCEPT_LSTAT
|
||||
INTERCEPTOR(int, lstat, const char *path, void *buf) {
|
||||
void *ctx;
|
||||
@ -6888,6 +7006,22 @@ INTERCEPTOR(int, lstat, const char *path, void *buf) {
|
||||
#define INIT_LSTAT
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_STAT64
|
||||
INTERCEPTOR(int, lstat64, const char *path, void *buf) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, lstat64, path, buf);
|
||||
if (common_flags()->intercept_stat)
|
||||
COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
|
||||
int res = REAL(lstat64)(path, buf);
|
||||
if (!res)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
|
||||
return res;
|
||||
}
|
||||
#define INIT_LSTAT64 COMMON_INTERCEPT_FUNCTION(lstat64)
|
||||
#else
|
||||
#define INIT_LSTAT64
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT___XSTAT
|
||||
INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
|
||||
void *ctx;
|
||||
@ -7858,12 +7992,12 @@ INTERCEPTOR(void, setbuf, __sanitizer_FILE *stream, char *buf) {
|
||||
unpoison_file(stream);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, int mode) {
|
||||
INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, SIZE_T size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, mode);
|
||||
REAL(setbuffer)(stream, buf, mode);
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, size);
|
||||
REAL(setbuffer)(stream, buf, size);
|
||||
if (buf) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
|
||||
}
|
||||
if (stream)
|
||||
unpoison_file(stream);
|
||||
@ -7905,7 +8039,7 @@ INTERCEPTOR(int, regcomp, void *preg, const char *pattern, int cflags) {
|
||||
if (pattern)
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, internal_strlen(pattern) + 1);
|
||||
int res = REAL(regcomp)(preg, pattern, cflags);
|
||||
if (!res)
|
||||
if (preg)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, preg, struct_regex_sz);
|
||||
return res;
|
||||
}
|
||||
@ -10290,6 +10424,7 @@ static void InitializeCommonInterceptors() {
|
||||
INIT_TIME;
|
||||
INIT_GLOB;
|
||||
INIT_GLOB64;
|
||||
INIT___B64_TO;
|
||||
INIT_POSIX_SPAWN;
|
||||
INIT_WAIT;
|
||||
INIT_WAIT4;
|
||||
@ -10447,8 +10582,10 @@ static void InitializeCommonInterceptors() {
|
||||
INIT_RECV_RECVFROM;
|
||||
INIT_SEND_SENDTO;
|
||||
INIT_STAT;
|
||||
INIT_STAT64;
|
||||
INIT_EVENTFD_READ_WRITE;
|
||||
INIT_LSTAT;
|
||||
INIT_LSTAT64;
|
||||
INIT___XSTAT;
|
||||
INIT___XSTAT64;
|
||||
INIT___LXSTAT;
|
||||
|
@ -115,11 +115,19 @@ static void ioctl_table_fill() {
|
||||
// _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
|
||||
_(BLKFLSBUF, NONE, 0);
|
||||
_(BLKGETSIZE, WRITE, sizeof(uptr));
|
||||
_(BLKRAGET, WRITE, sizeof(int));
|
||||
_(BLKRAGET, WRITE, sizeof(uptr));
|
||||
_(BLKRASET, NONE, 0);
|
||||
_(BLKROGET, WRITE, sizeof(int));
|
||||
_(BLKROSET, READ, sizeof(int));
|
||||
_(BLKRRPART, NONE, 0);
|
||||
_(BLKFRASET, NONE, 0);
|
||||
_(BLKFRAGET, WRITE, sizeof(uptr));
|
||||
_(BLKSECTSET, READ, sizeof(short));
|
||||
_(BLKSECTGET, WRITE, sizeof(short));
|
||||
_(BLKSSZGET, WRITE, sizeof(int));
|
||||
_(BLKBSZGET, WRITE, sizeof(int));
|
||||
_(BLKBSZSET, READ, sizeof(uptr));
|
||||
_(BLKGETSIZE64, WRITE, sizeof(u64));
|
||||
_(CDROMEJECT, NONE, 0);
|
||||
_(CDROMEJECT_SW, NONE, 0);
|
||||
_(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);
|
||||
|
@ -11,3 +11,5 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_inline_frames)
|
||||
|
@ -10,25 +10,22 @@
|
||||
// run-time libraries.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_allocator.h"
|
||||
#include "sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_procmaps.h"
|
||||
|
||||
#include "sanitizer_stackdepot.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
static void (*SoftRssLimitExceededCallback)(bool exceeded);
|
||||
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
|
||||
CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
|
||||
SoftRssLimitExceededCallback = Callback;
|
||||
}
|
||||
|
||||
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
|
||||
// Weak default implementation for when sanitizer_stackdepot is not linked in.
|
||||
SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }
|
||||
|
||||
void *BackgroundThread(void *arg) {
|
||||
VPrintf(1, "%s: Started BackgroundThread\n", SanitizerToolName);
|
||||
const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
|
||||
const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
|
||||
const bool heap_profile = common_flags()->heap_profile;
|
||||
@ -66,13 +63,11 @@ void *BackgroundThread(void *arg) {
|
||||
reached_soft_rss_limit = true;
|
||||
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
|
||||
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
|
||||
if (SoftRssLimitExceededCallback)
|
||||
SoftRssLimitExceededCallback(true);
|
||||
SetRssLimitExceeded(true);
|
||||
} else if (soft_rss_limit_mb >= current_rss_mb &&
|
||||
reached_soft_rss_limit) {
|
||||
reached_soft_rss_limit = false;
|
||||
if (SoftRssLimitExceededCallback)
|
||||
SoftRssLimitExceededCallback(false);
|
||||
SetRssLimitExceeded(false);
|
||||
}
|
||||
}
|
||||
if (heap_profile &&
|
||||
@ -83,6 +78,42 @@ void *BackgroundThread(void *arg) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MaybeStartBackgroudThread() {
|
||||
// Need to implement/test on other platforms.
|
||||
// Start the background thread if one of the rss limits is given.
|
||||
if (!common_flags()->hard_rss_limit_mb &&
|
||||
!common_flags()->soft_rss_limit_mb &&
|
||||
!common_flags()->heap_profile) return;
|
||||
if (!&real_pthread_create) {
|
||||
VPrintf(1, "%s: real_pthread_create undefined\n", SanitizerToolName);
|
||||
return; // Can't spawn the thread anyway.
|
||||
}
|
||||
|
||||
static bool started = false;
|
||||
if (!started) {
|
||||
started = true;
|
||||
internal_start_thread(BackgroundThread, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
# if !SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
|
||||
# ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
// We avoid global-constructors to be sure that globals are ready when
|
||||
// sanitizers need them. This can happend before global constructors executed.
|
||||
// Here we don't mind if thread is started on later stages.
|
||||
# pragma clang diagnostic ignored "-Wglobal-constructors"
|
||||
# endif
|
||||
static struct BackgroudThreadStarted {
|
||||
BackgroudThreadStarted() { MaybeStartBackgroudThread(); }
|
||||
} background_thread_strarter UNUSED;
|
||||
# ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
void MaybeStartBackgroudThread() {}
|
||||
#endif
|
||||
|
||||
void WriteToSyslog(const char *msg) {
|
||||
@ -105,18 +136,6 @@ void WriteToSyslog(const char *msg) {
|
||||
WriteOneLineToSyslog(p);
|
||||
}
|
||||
|
||||
void MaybeStartBackgroudThread() {
|
||||
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && \
|
||||
!SANITIZER_GO // Need to implement/test on other platforms.
|
||||
// Start the background thread if one of the rss limits is given.
|
||||
if (!common_flags()->hard_rss_limit_mb &&
|
||||
!common_flags()->soft_rss_limit_mb &&
|
||||
!common_flags()->heap_profile) return;
|
||||
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
|
||||
internal_start_thread(BackgroundThread, nullptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void (*sandboxing_callback)();
|
||||
void SetSandboxingCallback(void (*f)()) {
|
||||
sandboxing_callback = f;
|
||||
@ -185,10 +204,22 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
|
||||
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
|
||||
#if !SANITIZER_WINDOWS && !SANITIZER_GO
|
||||
// Weak default implementation for when sanitizer_stackdepot is not linked in.
|
||||
SANITIZER_WEAK_ATTRIBUTE void StackDepotStopBackgroundThread() {}
|
||||
static void StopStackDepotBackgroundThread() {
|
||||
StackDepotStopBackgroundThread();
|
||||
}
|
||||
#else
|
||||
// SANITIZER_WEAK_ATTRIBUTE is unsupported.
|
||||
static void StopStackDepotBackgroundThread() {}
|
||||
#endif
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
|
||||
__sanitizer_sandbox_arguments *args) {
|
||||
__sanitizer::StopStackDepotBackgroundThread();
|
||||
__sanitizer::PlatformPrepareForSandboxing(args);
|
||||
if (__sanitizer::sandboxing_callback)
|
||||
__sanitizer::sandboxing_callback();
|
||||
|
@ -33,6 +33,7 @@
|
||||
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_symbolizer_fuchsia.h"
|
||||
|
||||
|
@ -10,11 +10,13 @@
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if !SANITIZER_FUCHSIA
|
||||
#include "sancov_flags.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_file.h"
|
||||
# include "sancov_flags.h"
|
||||
# include "sanitizer_allocator_internal.h"
|
||||
# include "sanitizer_atomic.h"
|
||||
# include "sanitizer_common.h"
|
||||
# include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
# include "sanitizer_file.h"
|
||||
# include "sanitizer_interface_internal.h"
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
||||
@ -72,7 +74,7 @@ static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
|
||||
const uptr pc = pcs[i];
|
||||
if (!pc) continue;
|
||||
|
||||
if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
|
||||
if (!GetModuleAndOffsetForPc(pc, nullptr, 0, &pcs[i])) {
|
||||
Printf("ERROR: unknown pc 0x%zx (may happen if dlclose is used)\n", pc);
|
||||
continue;
|
||||
}
|
||||
@ -87,8 +89,7 @@ static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
|
||||
last_base = module_base;
|
||||
module_start_idx = i;
|
||||
module_found = true;
|
||||
__sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,
|
||||
&pcs[i]);
|
||||
GetModuleAndOffsetForPc(pc, module_name, kMaxPathLength, &pcs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -222,7 +223,8 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(const uptr* pcs,
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) {
|
||||
if (!*guard) return;
|
||||
__sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
|
||||
__sancov::pc_guard_controller.TracePcGuard(
|
||||
guard, StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
|
||||
|
705
libsanitizer/sanitizer_common/sanitizer_dense_map.h
Normal file
705
libsanitizer/sanitizer_common/sanitizer_dense_map.h
Normal file
@ -0,0 +1,705 @@
|
||||
//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This is fork of llvm/ADT/DenseMap.h class with the following changes:
|
||||
// * Use mmap to allocate.
|
||||
// * No iterators.
|
||||
// * Does not shrink.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_DENSE_MAP_H
|
||||
#define SANITIZER_DENSE_MAP_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_dense_map_info.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_type_traits.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
|
||||
typename BucketT>
|
||||
class DenseMapBase {
|
||||
public:
|
||||
using size_type = unsigned;
|
||||
using key_type = KeyT;
|
||||
using mapped_type = ValueT;
|
||||
using value_type = BucketT;
|
||||
|
||||
WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
|
||||
unsigned size() const { return getNumEntries(); }
|
||||
|
||||
/// Grow the densemap so that it can contain at least \p NumEntries items
|
||||
/// before resizing again.
|
||||
void reserve(size_type NumEntries) {
|
||||
auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
|
||||
if (NumBuckets > getNumBuckets())
|
||||
grow(NumBuckets);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
if (getNumEntries() == 0 && getNumTombstones() == 0)
|
||||
return;
|
||||
|
||||
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
|
||||
if (__sanitizer::is_trivially_destructible<ValueT>::value) {
|
||||
// Use a simpler loop when values don't need destruction.
|
||||
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
|
||||
P->getFirst() = EmptyKey;
|
||||
} else {
|
||||
unsigned NumEntries = getNumEntries();
|
||||
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
|
||||
if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
|
||||
if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
|
||||
P->getSecond().~ValueT();
|
||||
--NumEntries;
|
||||
}
|
||||
P->getFirst() = EmptyKey;
|
||||
}
|
||||
}
|
||||
CHECK_EQ(NumEntries, 0);
|
||||
}
|
||||
setNumEntries(0);
|
||||
setNumTombstones(0);
|
||||
}
|
||||
|
||||
/// Return 1 if the specified key is in the map, 0 otherwise.
|
||||
size_type count(const KeyT &Key) const {
|
||||
const BucketT *TheBucket;
|
||||
return LookupBucketFor(Key, TheBucket) ? 1 : 0;
|
||||
}
|
||||
|
||||
value_type *find(const KeyT &Key) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return TheBucket;
|
||||
return nullptr;
|
||||
}
|
||||
const value_type *find(const KeyT &Key) const {
|
||||
const BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return TheBucket;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Alternate version of find() which allows a different, and possibly
|
||||
/// less expensive, key type.
|
||||
/// The DenseMapInfo is responsible for supplying methods
|
||||
/// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
|
||||
/// type used.
|
||||
template <class LookupKeyT>
|
||||
value_type *find_as(const LookupKeyT &Key) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return TheBucket;
|
||||
return nullptr;
|
||||
}
|
||||
template <class LookupKeyT>
|
||||
const value_type *find_as(const LookupKeyT &Key) const {
|
||||
const BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return TheBucket;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// lookup - Return the entry for the specified key, or a default
|
||||
/// constructed value if no such entry exists.
|
||||
ValueT lookup(const KeyT &Key) const {
|
||||
const BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return TheBucket->getSecond();
|
||||
return ValueT();
|
||||
}
|
||||
|
||||
// Inserts key,value pair into the map if the key isn't already in the map.
|
||||
// If the key is already in the map, it returns false and doesn't update the
|
||||
// value.
|
||||
detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
|
||||
return try_emplace(KV.first, KV.second);
|
||||
}
|
||||
|
||||
// Inserts key,value pair into the map if the key isn't already in the map.
|
||||
// If the key is already in the map, it returns false and doesn't update the
|
||||
// value.
|
||||
detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
|
||||
return try_emplace(__sanitizer::move(KV.first),
|
||||
__sanitizer::move(KV.second));
|
||||
}
|
||||
|
||||
// Inserts key,value pair into the map if the key isn't already in the map.
|
||||
// The value is constructed in-place if the key is not in the map, otherwise
|
||||
// it is not moved.
|
||||
template <typename... Ts>
|
||||
detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
|
||||
Ts &&...Args) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return {TheBucket, false}; // Already in map.
|
||||
|
||||
// Otherwise, insert the new element.
|
||||
TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
|
||||
__sanitizer::forward<Ts>(Args)...);
|
||||
return {TheBucket, true};
|
||||
}
|
||||
|
||||
// Inserts key,value pair into the map if the key isn't already in the map.
|
||||
// The value is constructed in-place if the key is not in the map, otherwise
|
||||
// it is not moved.
|
||||
template <typename... Ts>
|
||||
detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
|
||||
Ts &&...Args) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return {TheBucket, false}; // Already in map.
|
||||
|
||||
// Otherwise, insert the new element.
|
||||
TheBucket =
|
||||
InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
|
||||
return {TheBucket, true};
|
||||
}
|
||||
|
||||
/// Alternate version of insert() which allows a different, and possibly
|
||||
/// less expensive, key type.
|
||||
/// The DenseMapInfo is responsible for supplying methods
|
||||
/// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
|
||||
/// type used.
|
||||
template <typename LookupKeyT>
|
||||
detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
|
||||
const LookupKeyT &Val) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Val, TheBucket))
|
||||
return {TheBucket, false}; // Already in map.
|
||||
|
||||
// Otherwise, insert the new element.
|
||||
TheBucket =
|
||||
InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
|
||||
__sanitizer::move(KV.second), Val);
|
||||
return {TheBucket, true};
|
||||
}
|
||||
|
||||
bool erase(const KeyT &Val) {
|
||||
BucketT *TheBucket;
|
||||
if (!LookupBucketFor(Val, TheBucket))
|
||||
return false; // not in map.
|
||||
|
||||
TheBucket->getSecond().~ValueT();
|
||||
TheBucket->getFirst() = getTombstoneKey();
|
||||
decrementNumEntries();
|
||||
incrementNumTombstones();
|
||||
return true;
|
||||
}
|
||||
|
||||
void erase(value_type *I) {
|
||||
CHECK_NE(I, nullptr);
|
||||
BucketT *TheBucket = &*I;
|
||||
TheBucket->getSecond().~ValueT();
|
||||
TheBucket->getFirst() = getTombstoneKey();
|
||||
decrementNumEntries();
|
||||
incrementNumTombstones();
|
||||
}
|
||||
|
||||
value_type &FindAndConstruct(const KeyT &Key) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return *TheBucket;
|
||||
|
||||
return *InsertIntoBucket(TheBucket, Key);
|
||||
}
|
||||
|
||||
ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
|
||||
|
||||
value_type &FindAndConstruct(KeyT &&Key) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return *TheBucket;
|
||||
|
||||
return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
|
||||
}
|
||||
|
||||
ValueT &operator[](KeyT &&Key) {
|
||||
return FindAndConstruct(__sanitizer::move(Key)).second;
|
||||
}
|
||||
|
||||
/// Iterate over active entries of the container.
|
||||
///
|
||||
/// Function can return fast to stop the process.
|
||||
template <class Fn>
|
||||
void forEach(Fn fn) {
|
||||
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
|
||||
for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
|
||||
const KeyT K = P->getFirst();
|
||||
if (!KeyInfoT::isEqual(K, EmptyKey) &&
|
||||
!KeyInfoT::isEqual(K, TombstoneKey)) {
|
||||
if (!fn(*P))
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class Fn>
|
||||
void forEach(Fn fn) const {
|
||||
const_cast<DenseMapBase *>(this)->forEach(
|
||||
[&](const value_type &KV) { return fn(KV); });
|
||||
}
|
||||
|
||||
protected:
|
||||
DenseMapBase() = default;
|
||||
|
||||
void destroyAll() {
|
||||
if (getNumBuckets() == 0) // Nothing to do.
|
||||
return;
|
||||
|
||||
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
|
||||
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
|
||||
if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
|
||||
!KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
|
||||
P->getSecond().~ValueT();
|
||||
P->getFirst().~KeyT();
|
||||
}
|
||||
}
|
||||
|
||||
void initEmpty() {
|
||||
setNumEntries(0);
|
||||
setNumTombstones(0);
|
||||
|
||||
CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
|
||||
::new (&B->getFirst()) KeyT(EmptyKey);
|
||||
}
|
||||
|
||||
/// Returns the number of buckets to allocate to ensure that the DenseMap can
|
||||
/// accommodate \p NumEntries without need to grow().
|
||||
unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
|
||||
// Ensure that "NumEntries * 4 < NumBuckets * 3"
|
||||
if (NumEntries == 0)
|
||||
return 0;
|
||||
// +1 is required because of the strict equality.
|
||||
// For example if NumEntries is 48, we need to return 401.
|
||||
return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);
|
||||
}
|
||||
|
||||
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
|
||||
initEmpty();
|
||||
|
||||
// Insert all the old elements.
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
const KeyT TombstoneKey = getTombstoneKey();
|
||||
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
|
||||
if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
|
||||
!KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
|
||||
// Insert the key/value into the new table.
|
||||
BucketT *DestBucket;
|
||||
bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
|
||||
(void)FoundVal; // silence warning.
|
||||
CHECK(!FoundVal);
|
||||
DestBucket->getFirst() = __sanitizer::move(B->getFirst());
|
||||
::new (&DestBucket->getSecond())
|
||||
ValueT(__sanitizer::move(B->getSecond()));
|
||||
incrementNumEntries();
|
||||
|
||||
// Free the value.
|
||||
B->getSecond().~ValueT();
|
||||
}
|
||||
B->getFirst().~KeyT();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename OtherBaseT>
|
||||
void copyFrom(
|
||||
const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
|
||||
CHECK_NE(&other, this);
|
||||
CHECK_EQ(getNumBuckets(), other.getNumBuckets());
|
||||
|
||||
setNumEntries(other.getNumEntries());
|
||||
setNumTombstones(other.getNumTombstones());
|
||||
|
||||
if (__sanitizer::is_trivially_copyable<KeyT>::value &&
|
||||
__sanitizer::is_trivially_copyable<ValueT>::value)
|
||||
internal_memcpy(reinterpret_cast<void *>(getBuckets()),
|
||||
other.getBuckets(), getNumBuckets() * sizeof(BucketT));
|
||||
else
|
||||
for (uptr i = 0; i < getNumBuckets(); ++i) {
|
||||
::new (&getBuckets()[i].getFirst())
|
||||
KeyT(other.getBuckets()[i].getFirst());
|
||||
if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
|
||||
!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
|
||||
::new (&getBuckets()[i].getSecond())
|
||||
ValueT(other.getBuckets()[i].getSecond());
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned getHashValue(const KeyT &Val) {
|
||||
return KeyInfoT::getHashValue(Val);
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
static unsigned getHashValue(const LookupKeyT &Val) {
|
||||
return KeyInfoT::getHashValue(Val);
|
||||
}
|
||||
|
||||
static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
|
||||
|
||||
static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
|
||||
|
||||
private:
|
||||
unsigned getNumEntries() const {
|
||||
return static_cast<const DerivedT *>(this)->getNumEntries();
|
||||
}
|
||||
|
||||
void setNumEntries(unsigned Num) {
|
||||
static_cast<DerivedT *>(this)->setNumEntries(Num);
|
||||
}
|
||||
|
||||
void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
|
||||
|
||||
void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
|
||||
|
||||
unsigned getNumTombstones() const {
|
||||
return static_cast<const DerivedT *>(this)->getNumTombstones();
|
||||
}
|
||||
|
||||
void setNumTombstones(unsigned Num) {
|
||||
static_cast<DerivedT *>(this)->setNumTombstones(Num);
|
||||
}
|
||||
|
||||
void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
|
||||
|
||||
void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
|
||||
|
||||
const BucketT *getBuckets() const {
|
||||
return static_cast<const DerivedT *>(this)->getBuckets();
|
||||
}
|
||||
|
||||
BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
|
||||
|
||||
unsigned getNumBuckets() const {
|
||||
return static_cast<const DerivedT *>(this)->getNumBuckets();
|
||||
}
|
||||
|
||||
BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
|
||||
|
||||
const BucketT *getBucketsEnd() const {
|
||||
return getBuckets() + getNumBuckets();
|
||||
}
|
||||
|
||||
void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
|
||||
|
||||
template <typename KeyArg, typename... ValueArgs>
|
||||
BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
|
||||
ValueArgs &&...Values) {
|
||||
TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
|
||||
|
||||
TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);
|
||||
::new (&TheBucket->getSecond())
|
||||
ValueT(__sanitizer::forward<ValueArgs>(Values)...);
|
||||
return TheBucket;
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
|
||||
ValueT &&Value, LookupKeyT &Lookup) {
|
||||
TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
|
||||
|
||||
TheBucket->getFirst() = __sanitizer::move(Key);
|
||||
::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));
|
||||
return TheBucket;
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
|
||||
BucketT *TheBucket) {
|
||||
// If the load of the hash table is more than 3/4, or if fewer than 1/8 of
|
||||
// the buckets are empty (meaning that many are filled with tombstones),
|
||||
// grow the table.
|
||||
//
|
||||
// The later case is tricky. For example, if we had one empty bucket with
|
||||
// tons of tombstones, failing lookups (e.g. for insertion) would have to
|
||||
// probe almost the entire table until it found the empty bucket. If the
|
||||
// table completely filled with tombstones, no lookup would ever succeed,
|
||||
// causing infinite loops in lookup.
|
||||
unsigned NewNumEntries = getNumEntries() + 1;
|
||||
unsigned NumBuckets = getNumBuckets();
|
||||
if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
|
||||
this->grow(NumBuckets * 2);
|
||||
LookupBucketFor(Lookup, TheBucket);
|
||||
NumBuckets = getNumBuckets();
|
||||
} else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=
|
||||
NumBuckets / 8)) {
|
||||
this->grow(NumBuckets);
|
||||
LookupBucketFor(Lookup, TheBucket);
|
||||
}
|
||||
CHECK(TheBucket);
|
||||
|
||||
// Only update the state after we've grown our bucket space appropriately
|
||||
// so that when growing buckets we have self-consistent entry count.
|
||||
incrementNumEntries();
|
||||
|
||||
// If we are writing over a tombstone, remember this.
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
|
||||
decrementNumTombstones();
|
||||
|
||||
return TheBucket;
|
||||
}
|
||||
|
||||
/// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
|
||||
/// FoundBucket. If the bucket contains the key and a value, this returns
|
||||
/// true, otherwise it returns a bucket with an empty marker or tombstone and
|
||||
/// returns false.
|
||||
template <typename LookupKeyT>
|
||||
bool LookupBucketFor(const LookupKeyT &Val,
|
||||
const BucketT *&FoundBucket) const {
|
||||
const BucketT *BucketsPtr = getBuckets();
|
||||
const unsigned NumBuckets = getNumBuckets();
|
||||
|
||||
if (NumBuckets == 0) {
|
||||
FoundBucket = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
// FoundTombstone - Keep track of whether we find a tombstone while probing.
|
||||
const BucketT *FoundTombstone = nullptr;
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
const KeyT TombstoneKey = getTombstoneKey();
|
||||
CHECK(!KeyInfoT::isEqual(Val, EmptyKey));
|
||||
CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));
|
||||
|
||||
unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
|
||||
unsigned ProbeAmt = 1;
|
||||
while (true) {
|
||||
const BucketT *ThisBucket = BucketsPtr + BucketNo;
|
||||
// Found Val's bucket? If so, return it.
|
||||
if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
|
||||
FoundBucket = ThisBucket;
|
||||
return true;
|
||||
}
|
||||
|
||||
// If we found an empty bucket, the key doesn't exist in the set.
|
||||
// Insert it and return the default value.
|
||||
if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
|
||||
// If we've already seen a tombstone while probing, fill it in instead
|
||||
// of the empty bucket we eventually probed to.
|
||||
FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
|
||||
return false;
|
||||
}
|
||||
|
||||
// If this is a tombstone, remember it. If Val ends up not in the map, we
|
||||
// prefer to return it than something that would require more probing.
|
||||
if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
|
||||
!FoundTombstone)
|
||||
FoundTombstone = ThisBucket; // Remember the first tombstone found.
|
||||
|
||||
// Otherwise, it's a hash collision or a tombstone, continue quadratic
|
||||
// probing.
|
||||
BucketNo += ProbeAmt++;
|
||||
BucketNo &= (NumBuckets - 1);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
|
||||
const BucketT *ConstFoundBucket;
|
||||
bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(
|
||||
Val, ConstFoundBucket);
|
||||
FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
|
||||
return Result;
|
||||
}
|
||||
|
||||
public:
|
||||
/// Return the approximate size (in bytes) of the actual map.
|
||||
/// This is just the raw memory used by DenseMap.
|
||||
/// If entries are pointers to objects, the size of the referenced objects
|
||||
/// are not included.
|
||||
uptr getMemorySize() const {
|
||||
return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());
|
||||
}
|
||||
};
|
||||
|
||||
/// Equality comparison for DenseMap.
|
||||
///
|
||||
/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
|
||||
/// is also in RHS, and that no additional pairs are in RHS.
|
||||
/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
|
||||
/// complexity is linear, worst case is O(N^2) (if every hash collides).
|
||||
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
|
||||
typename BucketT>
|
||||
bool operator==(
|
||||
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
|
||||
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
|
||||
if (LHS.size() != RHS.size())
|
||||
return false;
|
||||
|
||||
bool R = true;
|
||||
LHS.forEach(
|
||||
[&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,
|
||||
BucketT>::value_type &KV) -> bool {
|
||||
const auto *I = RHS.find(KV.first);
|
||||
if (!I || I->second != KV.second) {
|
||||
R = false;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
return R;
|
||||
}
|
||||
|
||||
/// Inequality comparison for DenseMap.
|
||||
///
|
||||
/// Equivalent to !(LHS == RHS). See operator== for performance notes.
|
||||
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
|
||||
typename BucketT>
|
||||
bool operator!=(
|
||||
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
|
||||
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
|
||||
return !(LHS == RHS);
|
||||
}
|
||||
|
||||
template <typename KeyT, typename ValueT,
|
||||
typename KeyInfoT = DenseMapInfo<KeyT>,
|
||||
typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
|
||||
class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
|
||||
KeyT, ValueT, KeyInfoT, BucketT> {
|
||||
friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
|
||||
|
||||
// Lift some types from the dependent base class into this class for
|
||||
// simplicity of referring to them.
|
||||
using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
|
||||
|
||||
BucketT *Buckets = nullptr;
|
||||
unsigned NumEntries = 0;
|
||||
unsigned NumTombstones = 0;
|
||||
unsigned NumBuckets = 0;
|
||||
|
||||
public:
|
||||
/// Create a DenseMap with an optional \p InitialReserve that guarantee that
|
||||
/// this number of elements can be inserted in the map without grow()
|
||||
explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); }
|
||||
constexpr DenseMap() = default;
|
||||
|
||||
DenseMap(const DenseMap &other) : BaseT() {
|
||||
init(0);
|
||||
copyFrom(other);
|
||||
}
|
||||
|
||||
DenseMap(DenseMap &&other) : BaseT() {
|
||||
init(0);
|
||||
swap(other);
|
||||
}
|
||||
|
||||
~DenseMap() {
|
||||
this->destroyAll();
|
||||
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
|
||||
}
|
||||
|
||||
void swap(DenseMap &RHS) {
|
||||
Swap(Buckets, RHS.Buckets);
|
||||
Swap(NumEntries, RHS.NumEntries);
|
||||
Swap(NumTombstones, RHS.NumTombstones);
|
||||
Swap(NumBuckets, RHS.NumBuckets);
|
||||
}
|
||||
|
||||
DenseMap &operator=(const DenseMap &other) {
|
||||
if (&other != this)
|
||||
copyFrom(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
DenseMap &operator=(DenseMap &&other) {
|
||||
this->destroyAll();
|
||||
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
|
||||
init(0);
|
||||
swap(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void copyFrom(const DenseMap &other) {
|
||||
this->destroyAll();
|
||||
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
|
||||
if (allocateBuckets(other.NumBuckets)) {
|
||||
this->BaseT::copyFrom(other);
|
||||
} else {
|
||||
NumEntries = 0;
|
||||
NumTombstones = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void init(unsigned InitNumEntries) {
|
||||
auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
|
||||
if (allocateBuckets(InitBuckets)) {
|
||||
this->BaseT::initEmpty();
|
||||
} else {
|
||||
NumEntries = 0;
|
||||
NumTombstones = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void grow(unsigned AtLeast) {
|
||||
unsigned OldNumBuckets = NumBuckets;
|
||||
BucketT *OldBuckets = Buckets;
|
||||
|
||||
allocateBuckets(RoundUpToPowerOfTwo(Max<unsigned>(64, AtLeast)));
|
||||
CHECK(Buckets);
|
||||
if (!OldBuckets) {
|
||||
this->BaseT::initEmpty();
|
||||
return;
|
||||
}
|
||||
|
||||
this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
|
||||
|
||||
// Free the old table.
|
||||
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets);
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned getNumEntries() const { return NumEntries; }
|
||||
|
||||
void setNumEntries(unsigned Num) { NumEntries = Num; }
|
||||
|
||||
unsigned getNumTombstones() const { return NumTombstones; }
|
||||
|
||||
void setNumTombstones(unsigned Num) { NumTombstones = Num; }
|
||||
|
||||
BucketT *getBuckets() const { return Buckets; }
|
||||
|
||||
unsigned getNumBuckets() const { return NumBuckets; }
|
||||
|
||||
bool allocateBuckets(unsigned Num) {
|
||||
NumBuckets = Num;
|
||||
if (NumBuckets == 0) {
|
||||
Buckets = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
uptr Size = sizeof(BucketT) * NumBuckets;
|
||||
if (Size * 2 <= GetPageSizeCached()) {
|
||||
// We always allocate at least a page, so use entire space.
|
||||
unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size);
|
||||
Size <<= Log2;
|
||||
NumBuckets <<= Log2;
|
||||
CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);
|
||||
CHECK_GT(Size * 2, GetPageSizeCached());
|
||||
}
|
||||
Buckets = static_cast<BucketT *>(allocate_buffer(Size));
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *allocate_buffer(uptr Size) {
|
||||
return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), "DenseMap");
|
||||
}
|
||||
|
||||
static void deallocate_buffer(void *Ptr, uptr Size) {
|
||||
UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached()));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_DENSE_MAP_H
|
282
libsanitizer/sanitizer_common/sanitizer_dense_map_info.h
Normal file
282
libsanitizer/sanitizer_common/sanitizer_dense_map_info.h
Normal file
@ -0,0 +1,282 @@
|
||||
//===- sanitizer_dense_map_info.h - Type traits for DenseMap ----*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_DENSE_MAP_INFO_H
|
||||
#define SANITIZER_DENSE_MAP_INFO_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_type_traits.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
namespace detail {
|
||||
|
||||
/// Simplistic combination of 32-bit hash values into 32-bit hash values.
|
||||
static constexpr unsigned combineHashValue(unsigned a, unsigned b) {
|
||||
u64 key = (u64)a << 32 | (u64)b;
|
||||
key += ~(key << 32);
|
||||
key ^= (key >> 22);
|
||||
key += ~(key << 13);
|
||||
key ^= (key >> 8);
|
||||
key += (key << 3);
|
||||
key ^= (key >> 15);
|
||||
key += ~(key << 27);
|
||||
key ^= (key >> 31);
|
||||
return (unsigned)key;
|
||||
}
|
||||
|
||||
// We extend a pair to allow users to override the bucket type with their own
|
||||
// implementation without requiring two members.
|
||||
template <typename KeyT, typename ValueT>
|
||||
struct DenseMapPair {
|
||||
KeyT first = {};
|
||||
ValueT second = {};
|
||||
constexpr DenseMapPair() = default;
|
||||
constexpr DenseMapPair(const KeyT &f, const ValueT &s)
|
||||
: first(f), second(s) {}
|
||||
|
||||
template <typename KeyT2, typename ValueT2>
|
||||
constexpr DenseMapPair(KeyT2 &&f, ValueT2 &&s)
|
||||
: first(__sanitizer::forward<KeyT2>(f)),
|
||||
second(__sanitizer::forward<ValueT2>(s)) {}
|
||||
|
||||
constexpr DenseMapPair(const DenseMapPair &other) = default;
|
||||
constexpr DenseMapPair &operator=(const DenseMapPair &other) = default;
|
||||
constexpr DenseMapPair(DenseMapPair &&other) = default;
|
||||
constexpr DenseMapPair &operator=(DenseMapPair &&other) = default;
|
||||
|
||||
KeyT &getFirst() { return first; }
|
||||
const KeyT &getFirst() const { return first; }
|
||||
ValueT &getSecond() { return second; }
|
||||
const ValueT &getSecond() const { return second; }
|
||||
};
|
||||
|
||||
} // end namespace detail
|
||||
|
||||
template <typename T>
|
||||
struct DenseMapInfo {
|
||||
// static T getEmptyKey();
|
||||
// static T getTombstoneKey();
|
||||
// static unsigned getHashValue(const T &Val);
|
||||
// static bool isEqual(const T &LHS, const T &RHS);
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values
|
||||
// that are aligned to alignof(T) bytes, but try to avoid requiring T to be
|
||||
// complete. This allows clients to instantiate DenseMap<T*, ...> with forward
|
||||
// declared key types. Assume that no pointer key type requires more than 4096
|
||||
// bytes of alignment.
|
||||
template <typename T>
|
||||
struct DenseMapInfo<T *> {
|
||||
// The following should hold, but it would require T to be complete:
|
||||
// static_assert(alignof(T) <= (1 << Log2MaxAlign),
|
||||
// "DenseMap does not support pointer keys requiring more than "
|
||||
// "Log2MaxAlign bits of alignment");
|
||||
static constexpr uptr Log2MaxAlign = 12;
|
||||
|
||||
static constexpr T *getEmptyKey() {
|
||||
uptr Val = static_cast<uptr>(-1);
|
||||
Val <<= Log2MaxAlign;
|
||||
return reinterpret_cast<T *>(Val);
|
||||
}
|
||||
|
||||
static constexpr T *getTombstoneKey() {
|
||||
uptr Val = static_cast<uptr>(-2);
|
||||
Val <<= Log2MaxAlign;
|
||||
return reinterpret_cast<T *>(Val);
|
||||
}
|
||||
|
||||
static constexpr unsigned getHashValue(const T *PtrVal) {
|
||||
return (unsigned((uptr)PtrVal) >> 4) ^ (unsigned((uptr)PtrVal) >> 9);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const T *LHS, const T *RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for chars.
|
||||
template <>
|
||||
struct DenseMapInfo<char> {
|
||||
static constexpr char getEmptyKey() { return ~0; }
|
||||
static constexpr char getTombstoneKey() { return ~0 - 1; }
|
||||
static constexpr unsigned getHashValue(const char &Val) { return Val * 37U; }
|
||||
|
||||
static constexpr bool isEqual(const char &LHS, const char &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned chars.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned char> {
|
||||
static constexpr unsigned char getEmptyKey() { return ~0; }
|
||||
static constexpr unsigned char getTombstoneKey() { return ~0 - 1; }
|
||||
static constexpr unsigned getHashValue(const unsigned char &Val) {
|
||||
return Val * 37U;
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned char &LHS,
|
||||
const unsigned char &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned shorts.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned short> {
|
||||
static constexpr unsigned short getEmptyKey() { return 0xFFFF; }
|
||||
static constexpr unsigned short getTombstoneKey() { return 0xFFFF - 1; }
|
||||
static constexpr unsigned getHashValue(const unsigned short &Val) {
|
||||
return Val * 37U;
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned short &LHS,
|
||||
const unsigned short &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned ints.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned> {
|
||||
static constexpr unsigned getEmptyKey() { return ~0U; }
|
||||
static constexpr unsigned getTombstoneKey() { return ~0U - 1; }
|
||||
static constexpr unsigned getHashValue(const unsigned &Val) {
|
||||
return Val * 37U;
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned &LHS, const unsigned &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned longs.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned long> {
|
||||
static constexpr unsigned long getEmptyKey() { return ~0UL; }
|
||||
static constexpr unsigned long getTombstoneKey() { return ~0UL - 1L; }
|
||||
|
||||
static constexpr unsigned getHashValue(const unsigned long &Val) {
|
||||
return (unsigned)(Val * 37UL);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned long &LHS,
|
||||
const unsigned long &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned long longs.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned long long> {
|
||||
static constexpr unsigned long long getEmptyKey() { return ~0ULL; }
|
||||
static constexpr unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
|
||||
|
||||
static constexpr unsigned getHashValue(const unsigned long long &Val) {
|
||||
return (unsigned)(Val * 37ULL);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned long long &LHS,
|
||||
const unsigned long long &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for shorts.
|
||||
template <>
|
||||
struct DenseMapInfo<short> {
|
||||
static constexpr short getEmptyKey() { return 0x7FFF; }
|
||||
static constexpr short getTombstoneKey() { return -0x7FFF - 1; }
|
||||
static constexpr unsigned getHashValue(const short &Val) { return Val * 37U; }
|
||||
static constexpr bool isEqual(const short &LHS, const short &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for ints.
|
||||
template <>
|
||||
struct DenseMapInfo<int> {
|
||||
static constexpr int getEmptyKey() { return 0x7fffffff; }
|
||||
static constexpr int getTombstoneKey() { return -0x7fffffff - 1; }
|
||||
static constexpr unsigned getHashValue(const int &Val) {
|
||||
return (unsigned)(Val * 37U);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const int &LHS, const int &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for longs.
|
||||
template <>
|
||||
struct DenseMapInfo<long> {
|
||||
static constexpr long getEmptyKey() {
|
||||
return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
|
||||
}
|
||||
|
||||
static constexpr long getTombstoneKey() { return getEmptyKey() - 1L; }
|
||||
|
||||
static constexpr unsigned getHashValue(const long &Val) {
|
||||
return (unsigned)(Val * 37UL);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const long &LHS, const long &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for long longs.
|
||||
template <>
|
||||
struct DenseMapInfo<long long> {
|
||||
static constexpr long long getEmptyKey() { return 0x7fffffffffffffffLL; }
|
||||
static constexpr long long getTombstoneKey() {
|
||||
return -0x7fffffffffffffffLL - 1;
|
||||
}
|
||||
|
||||
static constexpr unsigned getHashValue(const long long &Val) {
|
||||
return (unsigned)(Val * 37ULL);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const long long &LHS, const long long &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for all pairs whose members have info.
|
||||
template <typename T, typename U>
|
||||
struct DenseMapInfo<detail::DenseMapPair<T, U>> {
|
||||
using Pair = detail::DenseMapPair<T, U>;
|
||||
using FirstInfo = DenseMapInfo<T>;
|
||||
using SecondInfo = DenseMapInfo<U>;
|
||||
|
||||
static constexpr Pair getEmptyKey() {
|
||||
return detail::DenseMapPair<T, U>(FirstInfo::getEmptyKey(),
|
||||
SecondInfo::getEmptyKey());
|
||||
}
|
||||
|
||||
static constexpr Pair getTombstoneKey() {
|
||||
return detail::DenseMapPair<T, U>(FirstInfo::getTombstoneKey(),
|
||||
SecondInfo::getTombstoneKey());
|
||||
}
|
||||
|
||||
static constexpr unsigned getHashValue(const Pair &PairVal) {
|
||||
return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),
|
||||
SecondInfo::getHashValue(PairVal.second));
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const Pair &LHS, const Pair &RHS) {
|
||||
return FirstInfo::isEqual(LHS.first, RHS.first) &&
|
||||
SecondInfo::isEqual(LHS.second, RHS.second);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_DENSE_MAP_INFO_H
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_file.h"
|
||||
# include "sanitizer_interface_internal.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
@ -83,8 +84,12 @@ static void RecursiveCreateParentDirs(char *path) {
|
||||
if (!IsPathSeparator(path[i]))
|
||||
continue;
|
||||
path[i] = '\0';
|
||||
/* Some of these will fail, because the directory exists, ignore it. */
|
||||
CreateDir(path);
|
||||
if (!DirExists(path) && !CreateDir(path)) {
|
||||
const char *ErrorMsgPrefix = "ERROR: Can't create directory: ";
|
||||
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
|
||||
WriteToFile(kStderrFd, path, internal_strlen(path));
|
||||
Die();
|
||||
}
|
||||
path[i] = save;
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
#ifndef SANITIZER_FILE_H
|
||||
#define SANITIZER_FILE_H
|
||||
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
@ -78,6 +77,7 @@ bool SupportsColoredOutput(fd_t fd);
|
||||
// OS
|
||||
const char *GetPwd();
|
||||
bool FileExists(const char *filename);
|
||||
bool DirExists(const char *path);
|
||||
char *FindPathToBinary(const char *name);
|
||||
bool IsPathSeparator(const char c);
|
||||
bool IsAbsolutePath(const char *path);
|
||||
|
@ -179,6 +179,7 @@ COMMON_FLAG(bool, use_madv_dontdump, true,
|
||||
"in core file.")
|
||||
COMMON_FLAG(bool, symbolize_inline_frames, true,
|
||||
"Print inlined frames in stacktraces. Defaults to true.")
|
||||
COMMON_FLAG(bool, demangle, true, "Print demangled symbols.")
|
||||
COMMON_FLAG(bool, symbolize_vs_style, false,
|
||||
"Print file locations in Visual Studio style (e.g: "
|
||||
" file(10,42): ...")
|
||||
@ -191,6 +192,8 @@ COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
|
||||
"Format string used to render stack frames. "
|
||||
"See sanitizer_stacktrace_printer.h for the format description. "
|
||||
"Use DEFAULT to get default format.")
|
||||
COMMON_FLAG(int, compress_stack_depot, 0,
|
||||
"Compress stack depot to save memory.")
|
||||
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
|
||||
"If true, the shadow is not allowed to use huge pages. ")
|
||||
COMMON_FLAG(bool, strict_string_checks, false,
|
||||
|
@ -14,17 +14,18 @@
|
||||
#include "sanitizer_fuchsia.h"
|
||||
#if SANITIZER_FUCHSIA
|
||||
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <zircon/errors.h>
|
||||
#include <zircon/process.h>
|
||||
#include <zircon/syscalls.h>
|
||||
#include <zircon/utc.h>
|
||||
# include <pthread.h>
|
||||
# include <stdlib.h>
|
||||
# include <unistd.h>
|
||||
# include <zircon/errors.h>
|
||||
# include <zircon/process.h>
|
||||
# include <zircon/syscalls.h>
|
||||
# include <zircon/utc.h>
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
# include "sanitizer_common.h"
|
||||
# include "sanitizer_interface_internal.h"
|
||||
# include "sanitizer_libc.h"
|
||||
# include "sanitizer_mutex.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
@ -89,7 +90,7 @@ void InitializePlatformEarly() {}
|
||||
void MaybeReexec() {}
|
||||
void CheckASLR() {}
|
||||
void CheckMPROTECT() {}
|
||||
void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
|
||||
void PlatformPrepareForSandboxing(void *args) {}
|
||||
void DisableCoreDumperIfNecessary() {}
|
||||
void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
|
||||
void SetAlternateSignalStack() {}
|
||||
@ -274,6 +275,15 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
bool MprotectNoAccess(uptr addr, uptr size) {
|
||||
return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
|
||||
}
|
||||
|
||||
bool MprotectReadOnly(uptr addr, uptr size) {
|
||||
return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
|
||||
ZX_OK;
|
||||
}
|
||||
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
CHECK_GE(size, GetPageSize());
|
||||
@ -376,29 +386,8 @@ void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
|
||||
|
||||
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
|
||||
uptr *read_len, uptr max_len, error_t *errno_p) {
|
||||
zx_handle_t vmo;
|
||||
zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
|
||||
if (status == ZX_OK) {
|
||||
uint64_t vmo_size;
|
||||
status = _zx_vmo_get_size(vmo, &vmo_size);
|
||||
if (status == ZX_OK) {
|
||||
if (vmo_size < max_len)
|
||||
max_len = vmo_size;
|
||||
size_t map_size = RoundUpTo(max_len, GetPageSize());
|
||||
uintptr_t addr;
|
||||
status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
|
||||
map_size, &addr);
|
||||
if (status == ZX_OK) {
|
||||
*buff = reinterpret_cast<char *>(addr);
|
||||
*buff_size = map_size;
|
||||
*read_len = max_len;
|
||||
}
|
||||
}
|
||||
_zx_handle_close(vmo);
|
||||
}
|
||||
if (status != ZX_OK && errno_p)
|
||||
*errno_p = status;
|
||||
return status == ZX_OK;
|
||||
*errno_p = ZX_ERR_NOT_SUPPORTED;
|
||||
return false;
|
||||
}
|
||||
|
||||
void RawWrite(const char *buffer) {
|
||||
@ -475,6 +464,9 @@ u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
|
||||
|
||||
uptr GetRSS() { UNIMPLEMENTED(); }
|
||||
|
||||
void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
|
||||
void internal_join_thread(void *th) {}
|
||||
|
||||
void InitializePlatformCommonFlags(CommonFlags *cf) {}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -20,104 +20,102 @@
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
extern "C" {
|
||||
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
|
||||
// The special values are "stdout" and "stderr".
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_report_path(const char *path);
|
||||
// Tell the tools to write their reports to the provided file descriptor
|
||||
// (casted to void *).
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_report_fd(void *fd);
|
||||
// Get the current full report file path, if a path was specified by
|
||||
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const char *__sanitizer_get_report_path();
|
||||
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
|
||||
// The special values are "stdout" and "stderr".
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_report_path(const char *path);
|
||||
// Tell the tools to write their reports to the provided file descriptor
|
||||
// (casted to void *).
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_report_fd(void *fd);
|
||||
// Get the current full report file path, if a path was specified by
|
||||
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const char *__sanitizer_get_report_path();
|
||||
|
||||
typedef struct {
|
||||
int coverage_sandboxed;
|
||||
__sanitizer::sptr coverage_fd;
|
||||
unsigned int coverage_max_block_size;
|
||||
} __sanitizer_sandbox_arguments;
|
||||
typedef struct {
|
||||
int coverage_sandboxed;
|
||||
__sanitizer::sptr coverage_fd;
|
||||
unsigned int coverage_max_block_size;
|
||||
} __sanitizer_sandbox_arguments;
|
||||
|
||||
// Notify the tools that the sandbox is going to be turned on.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
|
||||
// Notify the tools that the sandbox is going to be turned on.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
|
||||
|
||||
// This function is called by the tool when it has just finished reporting
|
||||
// an error. 'error_summary' is a one-line string that summarizes
|
||||
// the error message. This function can be overridden by the client.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_report_error_summary(const char *error_summary);
|
||||
// This function is called by the tool when it has just finished reporting
|
||||
// an error. 'error_summary' is a one-line string that summarizes
|
||||
// the error message. This function can be overridden by the client.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_report_error_summary(const char *error_summary);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
|
||||
const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
|
||||
const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
|
||||
|
||||
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
|
||||
// to ensure only one report is printed when multiple errors occur
|
||||
// simultaneously.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
|
||||
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
|
||||
// to ensure only one report is printed when multiple errors occur
|
||||
// simultaneously.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_annotate_contiguous_container(const void *beg,
|
||||
const void *end,
|
||||
const void *old_mid,
|
||||
const void *new_mid);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
||||
const void *end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const void *__sanitizer_contiguous_container_find_bad_address(
|
||||
const void *beg, const void *mid, const void *end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_annotate_contiguous_container(const void *beg, const void *end,
|
||||
const void *old_mid,
|
||||
const void *new_mid);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
||||
const void *end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
|
||||
const void *mid,
|
||||
const void *end);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_get_module_and_offset_for_pc(
|
||||
__sanitizer::uptr pc, char *module_path,
|
||||
__sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
|
||||
__sanitizer::uptr module_path_len,
|
||||
void **pc_offset);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_const_cmp1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_const_cmp2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_const_cmp4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_const_cmp8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_switch();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_div4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_div8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_gep();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_indir();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
|
||||
__sanitizer::u32*);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_8bit_counters_init(char *, char *);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_bool_flag_init();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_pcs_init(const __sanitizer::uptr *,
|
||||
const __sanitizer::uptr *);
|
||||
} // extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_const_cmp1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_const_cmp2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_const_cmp4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_const_cmp8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_switch();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_div4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_div8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_gep();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_pc_indir();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_pc_guard(__sanitizer::u32 *);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_pc_guard_init(__sanitizer::u32 *, __sanitizer::u32 *);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_8bit_counters_init(char *, char *);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_bool_flag_init();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_pcs_init(const __sanitizer::uptr *, const __sanitizer::uptr *);
|
||||
} // extern "C"
|
||||
|
||||
#endif // SANITIZER_INTERFACE_INTERNAL_H
|
||||
|
@ -179,6 +179,7 @@ typedef int pid_t;
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC || \
|
||||
(SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
|
||||
(SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
|
||||
(SANITIZER_LINUX && (defined(__x86_64__) || defined(__hexagon__)))
|
||||
typedef u64 OFF_T;
|
||||
#else
|
||||
@ -258,7 +259,9 @@ typedef u64 tid_t;
|
||||
# define NOEXCEPT throw()
|
||||
#endif
|
||||
|
||||
#if __has_cpp_attribute(clang::fallthrough)
|
||||
#if __has_cpp_attribute(fallthrough)
|
||||
# define FALLTHROUGH [[fallthrough]]
|
||||
#elif __has_cpp_attribute(clang::fallthrough)
|
||||
# define FALLTHROUGH [[clang::fallthrough]]
|
||||
#else
|
||||
# define FALLTHROUGH
|
||||
@ -300,7 +303,8 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define RAW_CHECK(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__)
|
||||
#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr "\n", )
|
||||
#define RAW_CHECK_VA(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__)
|
||||
|
||||
#define CHECK_IMPL(c1, op, c2) \
|
||||
do { \
|
||||
|
87
libsanitizer/sanitizer_common/sanitizer_leb128.h
Normal file
87
libsanitizer/sanitizer_common/sanitizer_leb128.h
Normal file
@ -0,0 +1,87 @@
|
||||
//===-- sanitizer_leb128.h --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LEB128_H
|
||||
#define SANITIZER_LEB128_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
template <typename T, typename It>
|
||||
It EncodeSLEB128(T value, It begin, It end) {
|
||||
bool more;
|
||||
do {
|
||||
u8 byte = value & 0x7f;
|
||||
// NOTE: this assumes that this signed shift is an arithmetic right shift.
|
||||
value >>= 7;
|
||||
more = !((((value == 0) && ((byte & 0x40) == 0)) ||
|
||||
((value == -1) && ((byte & 0x40) != 0))));
|
||||
if (more)
|
||||
byte |= 0x80;
|
||||
if (UNLIKELY(begin == end))
|
||||
break;
|
||||
*(begin++) = byte;
|
||||
} while (more);
|
||||
return begin;
|
||||
}
|
||||
|
||||
template <typename T, typename It>
|
||||
It DecodeSLEB128(It begin, It end, T* v) {
|
||||
T value = 0;
|
||||
unsigned shift = 0;
|
||||
u8 byte;
|
||||
do {
|
||||
if (UNLIKELY(begin == end))
|
||||
return begin;
|
||||
byte = *(begin++);
|
||||
T slice = byte & 0x7f;
|
||||
value |= slice << shift;
|
||||
shift += 7;
|
||||
} while (byte >= 128);
|
||||
if (shift < 64 && (byte & 0x40))
|
||||
value |= (-1ULL) << shift;
|
||||
*v = value;
|
||||
return begin;
|
||||
}
|
||||
|
||||
template <typename T, typename It>
|
||||
It EncodeULEB128(T value, It begin, It end) {
|
||||
do {
|
||||
u8 byte = value & 0x7f;
|
||||
value >>= 7;
|
||||
if (value)
|
||||
byte |= 0x80;
|
||||
if (UNLIKELY(begin == end))
|
||||
break;
|
||||
*(begin++) = byte;
|
||||
} while (value);
|
||||
return begin;
|
||||
}
|
||||
|
||||
template <typename T, typename It>
|
||||
It DecodeULEB128(It begin, It end, T* v) {
|
||||
T value = 0;
|
||||
unsigned shift = 0;
|
||||
u8 byte;
|
||||
do {
|
||||
if (UNLIKELY(begin == end))
|
||||
return begin;
|
||||
byte = *(begin++);
|
||||
T slice = byte & 0x7f;
|
||||
value += slice << shift;
|
||||
shift += 7;
|
||||
} while (byte >= 128);
|
||||
*v = value;
|
||||
return begin;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_LEB128_H
|
@ -80,6 +80,7 @@
|
||||
|
||||
#if SANITIZER_FREEBSD
|
||||
#include <sys/exec.h>
|
||||
#include <sys/procctl.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <machine/atomic.h>
|
||||
extern "C" {
|
||||
@ -162,6 +163,12 @@ ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
|
||||
// on any thread, setuid call hangs.
|
||||
// See test/sanitizer_common/TestCases/Linux/setuid.c.
|
||||
internal_sigdelset(&set, 33);
|
||||
# endif
|
||||
# if SANITIZER_LINUX
|
||||
// Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls.
|
||||
// If this signal is blocked, such calls cannot be handled and the process may
|
||||
// hang.
|
||||
internal_sigdelset(&set, 31);
|
||||
# endif
|
||||
SetSigProcMask(&set, &saved_);
|
||||
if (copy)
|
||||
@ -226,7 +233,7 @@ uptr internal_close(fd_t fd) {
|
||||
}
|
||||
|
||||
uptr internal_open(const char *filename, int flags) {
|
||||
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# if SANITIZER_LINUX
|
||||
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
|
||||
#else
|
||||
return internal_syscall(SYSCALL(open), (uptr)filename, flags);
|
||||
@ -234,7 +241,7 @@ uptr internal_open(const char *filename, int flags) {
|
||||
}
|
||||
|
||||
uptr internal_open(const char *filename, int flags, u32 mode) {
|
||||
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# if SANITIZER_LINUX
|
||||
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
|
||||
mode);
|
||||
#else
|
||||
@ -335,50 +342,46 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
|
||||
uptr internal_stat(const char *path, void *buf) {
|
||||
#if SANITIZER_FREEBSD
|
||||
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
|
||||
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# elif SANITIZER_LINUX
|
||||
# if SANITIZER_WORDSIZE == 64
|
||||
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
|
||||
0);
|
||||
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
|
||||
# if defined(__mips64)
|
||||
// For mips64, stat syscall fills buffer in the format of kernel_stat
|
||||
struct kernel_stat kbuf;
|
||||
int res = internal_syscall(SYSCALL(stat), path, &kbuf);
|
||||
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
|
||||
# else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
|
||||
(uptr)&buf64, 0);
|
||||
stat64_to_stat(&buf64, (struct stat *)buf);
|
||||
return res;
|
||||
# else
|
||||
return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
|
||||
# endif
|
||||
#else
|
||||
# endif
|
||||
# else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
|
||||
stat64_to_stat(&buf64, (struct stat *)buf);
|
||||
return res;
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
|
||||
uptr internal_lstat(const char *path, void *buf) {
|
||||
#if SANITIZER_FREEBSD
|
||||
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
|
||||
AT_SYMLINK_NOFOLLOW);
|
||||
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# elif SANITIZER_LINUX
|
||||
# if defined(_LP64)
|
||||
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
|
||||
AT_SYMLINK_NOFOLLOW);
|
||||
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
|
||||
# if SANITIZER_MIPS64
|
||||
// For mips64, lstat syscall fills buffer in the format of kernel_stat
|
||||
struct kernel_stat kbuf;
|
||||
int res = internal_syscall(SYSCALL(lstat), path, &kbuf);
|
||||
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
|
||||
# else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
|
||||
(uptr)&buf64, AT_SYMLINK_NOFOLLOW);
|
||||
stat64_to_stat(&buf64, (struct stat *)buf);
|
||||
return res;
|
||||
# else
|
||||
return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
|
||||
# endif
|
||||
#else
|
||||
# endif
|
||||
# else
|
||||
struct stat64 buf64;
|
||||
int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
|
||||
stat64_to_stat(&buf64, (struct stat *)buf);
|
||||
return res;
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
|
||||
uptr internal_fstat(fd_t fd, void *buf) {
|
||||
@ -412,7 +415,7 @@ uptr internal_dup(int oldfd) {
|
||||
}
|
||||
|
||||
uptr internal_dup2(int oldfd, int newfd) {
|
||||
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# if SANITIZER_LINUX
|
||||
return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
|
||||
#else
|
||||
return internal_syscall(SYSCALL(dup2), oldfd, newfd);
|
||||
@ -420,7 +423,7 @@ uptr internal_dup2(int oldfd, int newfd) {
|
||||
}
|
||||
|
||||
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
|
||||
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# if SANITIZER_LINUX
|
||||
return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
|
||||
bufsize);
|
||||
#else
|
||||
@ -429,7 +432,7 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
|
||||
}
|
||||
|
||||
uptr internal_unlink(const char *path) {
|
||||
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# if SANITIZER_LINUX
|
||||
return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
|
||||
#else
|
||||
return internal_syscall(SYSCALL(unlink), (uptr)path);
|
||||
@ -440,12 +443,12 @@ uptr internal_rename(const char *oldpath, const char *newpath) {
|
||||
#if defined(__riscv) && defined(__linux__)
|
||||
return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
|
||||
(uptr)newpath, 0);
|
||||
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# elif SANITIZER_LINUX
|
||||
return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
|
||||
(uptr)newpath);
|
||||
#else
|
||||
# else
|
||||
return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
|
||||
uptr internal_sched_yield() {
|
||||
@ -482,17 +485,20 @@ bool FileExists(const char *filename) {
|
||||
if (ShouldMockFailureToOpen(filename))
|
||||
return false;
|
||||
struct stat st;
|
||||
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
|
||||
#else
|
||||
if (internal_stat(filename, &st))
|
||||
#endif
|
||||
return false;
|
||||
// Sanity check: filename is a regular file.
|
||||
return S_ISREG(st.st_mode);
|
||||
}
|
||||
|
||||
#if !SANITIZER_NETBSD
|
||||
bool DirExists(const char *path) {
|
||||
struct stat st;
|
||||
if (internal_stat(path, &st))
|
||||
return false;
|
||||
return S_ISDIR(st.st_mode);
|
||||
}
|
||||
|
||||
# if !SANITIZER_NETBSD
|
||||
tid_t GetTid() {
|
||||
#if SANITIZER_FREEBSD
|
||||
long Tid;
|
||||
@ -691,17 +697,17 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
|
||||
// Not used
|
||||
#else
|
||||
struct linux_dirent {
|
||||
#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64
|
||||
# if SANITIZER_X32 || SANITIZER_LINUX
|
||||
u64 d_ino;
|
||||
u64 d_off;
|
||||
#else
|
||||
# else
|
||||
unsigned long d_ino;
|
||||
unsigned long d_off;
|
||||
#endif
|
||||
# endif
|
||||
unsigned short d_reclen;
|
||||
#if defined(__aarch64__) || SANITIZER_RISCV64
|
||||
# if SANITIZER_LINUX
|
||||
unsigned char d_type;
|
||||
#endif
|
||||
# endif
|
||||
char d_name[256];
|
||||
};
|
||||
#endif
|
||||
@ -737,11 +743,11 @@ int internal_dlinfo(void *handle, int request, void *p) {
|
||||
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
|
||||
#if SANITIZER_FREEBSD
|
||||
return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);
|
||||
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# elif SANITIZER_LINUX
|
||||
return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
|
||||
#else
|
||||
# else
|
||||
return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
|
||||
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
|
||||
@ -759,11 +765,15 @@ uptr internal_sigaltstack(const void *ss, void *oss) {
|
||||
}
|
||||
|
||||
int internal_fork() {
|
||||
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# if SANITIZER_LINUX
|
||||
# if SANITIZER_S390
|
||||
return internal_syscall(SYSCALL(clone), 0, SIGCHLD);
|
||||
# else
|
||||
return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
|
||||
#else
|
||||
# endif
|
||||
# else
|
||||
return internal_syscall(SYSCALL(fork));
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
|
||||
#if SANITIZER_FREEBSD
|
||||
@ -1380,7 +1390,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
#elif defined(__aarch64__)
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr) {
|
||||
long long res;
|
||||
register long long res __asm__("x0");
|
||||
if (!fn || !child_stack)
|
||||
return -EINVAL;
|
||||
CHECK_EQ(0, (uptr)child_stack % 16);
|
||||
@ -1760,6 +1770,8 @@ HandleSignalMode GetHandleSignalMode(int signum) {
|
||||
|
||||
#if !SANITIZER_GO
|
||||
void *internal_start_thread(void *(*func)(void *arg), void *arg) {
|
||||
if (&real_pthread_create == 0)
|
||||
return nullptr;
|
||||
// Start the thread with signals blocked, otherwise it can steal user signals.
|
||||
ScopedBlockSignals block(nullptr);
|
||||
void *th;
|
||||
@ -1768,7 +1780,8 @@ void *internal_start_thread(void *(*func)(void *arg), void *arg) {
|
||||
}
|
||||
|
||||
void internal_join_thread(void *th) {
|
||||
real_pthread_join(th, nullptr);
|
||||
if (&real_pthread_join)
|
||||
real_pthread_join(th, nullptr);
|
||||
}
|
||||
#else
|
||||
void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }
|
||||
@ -1815,7 +1828,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
#else
|
||||
uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
|
||||
#endif // SANITIZER_FREEBSD
|
||||
return err & PF_WRITE ? WRITE : READ;
|
||||
return err & PF_WRITE ? Write : Read;
|
||||
#elif defined(__mips__)
|
||||
uint32_t *exception_source;
|
||||
uint32_t faulty_instruction;
|
||||
@ -1838,7 +1851,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
case 0x2a: // swl
|
||||
case 0x2e: // swr
|
||||
#endif
|
||||
return SignalContext::WRITE;
|
||||
return SignalContext::Write;
|
||||
|
||||
case 0x20: // lb
|
||||
case 0x24: // lbu
|
||||
@ -1853,27 +1866,27 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
case 0x22: // lwl
|
||||
case 0x26: // lwr
|
||||
#endif
|
||||
return SignalContext::READ;
|
||||
return SignalContext::Read;
|
||||
#if __mips_isa_rev == 6
|
||||
case 0x3b: // pcrel
|
||||
op_code = (faulty_instruction >> 19) & 0x3;
|
||||
switch (op_code) {
|
||||
case 0x1: // lwpc
|
||||
case 0x2: // lwupc
|
||||
return SignalContext::READ;
|
||||
return SignalContext::Read;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return SignalContext::UNKNOWN;
|
||||
return SignalContext::Unknown;
|
||||
#elif defined(__arm__)
|
||||
static const uptr FSR_WRITE = 1U << 11;
|
||||
uptr fsr = ucontext->uc_mcontext.error_code;
|
||||
return fsr & FSR_WRITE ? WRITE : READ;
|
||||
return fsr & FSR_WRITE ? Write : Read;
|
||||
#elif defined(__aarch64__)
|
||||
static const u64 ESR_ELx_WNR = 1U << 6;
|
||||
u64 esr;
|
||||
if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN;
|
||||
return esr & ESR_ELx_WNR ? WRITE : READ;
|
||||
if (!Aarch64GetESR(ucontext, &esr)) return Unknown;
|
||||
return esr & ESR_ELx_WNR ? Write : Read;
|
||||
#elif defined(__sparc__)
|
||||
// Decode the instruction to determine the access type.
|
||||
// From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
|
||||
@ -1889,7 +1902,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
#endif
|
||||
#endif
|
||||
u32 instr = *(u32 *)pc;
|
||||
return (instr >> 21) & 1 ? WRITE: READ;
|
||||
return (instr >> 21) & 1 ? Write: Read;
|
||||
#elif defined(__riscv)
|
||||
#if SANITIZER_FREEBSD
|
||||
unsigned long pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
|
||||
@ -1909,7 +1922,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
#if __riscv_xlen == 64
|
||||
case 0b10'011: // c.ldsp (rd != x0)
|
||||
#endif
|
||||
return rd ? SignalContext::READ : SignalContext::UNKNOWN;
|
||||
return rd ? SignalContext::Read : SignalContext::Unknown;
|
||||
case 0b00'010: // c.lw
|
||||
#if __riscv_flen >= 32 && __riscv_xlen == 32
|
||||
case 0b10'011: // c.flwsp
|
||||
@ -1921,7 +1934,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
case 0b00'001: // c.fld
|
||||
case 0b10'001: // c.fldsp
|
||||
#endif
|
||||
return SignalContext::READ;
|
||||
return SignalContext::Read;
|
||||
case 0b00'110: // c.sw
|
||||
case 0b10'110: // c.swsp
|
||||
#if __riscv_flen >= 32 || __riscv_xlen == 64
|
||||
@ -1932,9 +1945,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
case 0b00'101: // c.fsd
|
||||
case 0b10'101: // c.fsdsp
|
||||
#endif
|
||||
return SignalContext::WRITE;
|
||||
return SignalContext::Write;
|
||||
default:
|
||||
return SignalContext::UNKNOWN;
|
||||
return SignalContext::Unknown;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -1952,9 +1965,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
#endif
|
||||
case 0b100: // lbu
|
||||
case 0b101: // lhu
|
||||
return SignalContext::READ;
|
||||
return SignalContext::Read;
|
||||
default:
|
||||
return SignalContext::UNKNOWN;
|
||||
return SignalContext::Unknown;
|
||||
}
|
||||
case 0b0100011: // stores
|
||||
switch (funct3) {
|
||||
@ -1964,9 +1977,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
#if __riscv_xlen == 64
|
||||
case 0b011: // sd
|
||||
#endif
|
||||
return SignalContext::WRITE;
|
||||
return SignalContext::Write;
|
||||
default:
|
||||
return SignalContext::UNKNOWN;
|
||||
return SignalContext::Unknown;
|
||||
}
|
||||
#if __riscv_flen >= 32
|
||||
case 0b0000111: // floating-point loads
|
||||
@ -1975,9 +1988,9 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
#if __riscv_flen == 64
|
||||
case 0b011: // fld
|
||||
#endif
|
||||
return SignalContext::READ;
|
||||
return SignalContext::Read;
|
||||
default:
|
||||
return SignalContext::UNKNOWN;
|
||||
return SignalContext::Unknown;
|
||||
}
|
||||
case 0b0100111: // floating-point stores
|
||||
switch (funct3) {
|
||||
@ -1985,17 +1998,17 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
#if __riscv_flen == 64
|
||||
case 0b011: // fsd
|
||||
#endif
|
||||
return SignalContext::WRITE;
|
||||
return SignalContext::Write;
|
||||
default:
|
||||
return SignalContext::UNKNOWN;
|
||||
return SignalContext::Unknown;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
return SignalContext::UNKNOWN;
|
||||
return SignalContext::Unknown;
|
||||
}
|
||||
#else
|
||||
(void)ucontext;
|
||||
return UNKNOWN; // FIXME: Implement.
|
||||
return Unknown; // FIXME: Implement.
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2070,12 +2083,19 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
|
||||
*sp = ucontext->uc_mcontext.gregs[REG_UESP];
|
||||
# endif
|
||||
#elif defined(__powerpc__) || defined(__powerpc64__)
|
||||
# if SANITIZER_FREEBSD
|
||||
ucontext_t *ucontext = (ucontext_t *)context;
|
||||
*pc = ucontext->uc_mcontext.mc_srr0;
|
||||
*sp = ucontext->uc_mcontext.mc_frame[1];
|
||||
*bp = ucontext->uc_mcontext.mc_frame[31];
|
||||
# else
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.regs->nip;
|
||||
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
|
||||
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
|
||||
// pointer, but GCC always uses r31 when we need a frame pointer.
|
||||
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
|
||||
# endif
|
||||
#elif defined(__sparc__)
|
||||
#if defined(__arch64__) || defined(__sparcv9)
|
||||
#define STACK_BIAS 2047
|
||||
@ -2164,49 +2184,34 @@ void CheckASLR() {
|
||||
GetArgv()[0]);
|
||||
Die();
|
||||
}
|
||||
#elif SANITIZER_PPC64V2
|
||||
// Disable ASLR for Linux PPC64LE.
|
||||
int old_personality = personality(0xffffffff);
|
||||
if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
|
||||
VReport(1, "WARNING: Program is being run with address space layout "
|
||||
"randomization (ASLR) enabled which prevents the thread and "
|
||||
"memory sanitizers from working on powerpc64le.\n"
|
||||
"ASLR will be disabled and the program re-executed.\n");
|
||||
CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
|
||||
ReExec();
|
||||
}
|
||||
#elif SANITIZER_FREEBSD
|
||||
int aslr_pie;
|
||||
uptr len = sizeof(aslr_pie);
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
if (UNLIKELY(internal_sysctlbyname("kern.elf64.aslr.pie_enable",
|
||||
&aslr_pie, &len, NULL, 0) == -1)) {
|
||||
int aslr_status;
|
||||
if (UNLIKELY(procctl(P_PID, 0, PROC_ASLR_STATUS, &aslr_status) == -1)) {
|
||||
// We're making things less 'dramatic' here since
|
||||
// the OID is not necessarily guaranteed to be here
|
||||
// the cmd is not necessarily guaranteed to be here
|
||||
// just yet regarding FreeBSD release
|
||||
return;
|
||||
}
|
||||
|
||||
if (aslr_pie > 0) {
|
||||
if ((aslr_status & PROC_ASLR_ACTIVE) != 0) {
|
||||
Printf("This sanitizer is not compatible with enabled ASLR "
|
||||
"and binaries compiled with PIE\n");
|
||||
Die();
|
||||
}
|
||||
#endif
|
||||
// there might be 32 bits compat for 64 bits
|
||||
if (UNLIKELY(internal_sysctlbyname("kern.elf32.aslr.pie_enable",
|
||||
&aslr_pie, &len, NULL, 0) == -1)) {
|
||||
return;
|
||||
# elif SANITIZER_PPC64V2
|
||||
// Disable ASLR for Linux PPC64LE.
|
||||
int old_personality = personality(0xffffffff);
|
||||
if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
|
||||
VReport(1,
|
||||
"WARNING: Program is being run with address space layout "
|
||||
"randomization (ASLR) enabled which prevents the thread and "
|
||||
"memory sanitizers from working on powerpc64le.\n"
|
||||
"ASLR will be disabled and the program re-executed.\n");
|
||||
CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
|
||||
ReExec();
|
||||
}
|
||||
|
||||
if (aslr_pie > 0) {
|
||||
Printf("This sanitizer is not compatible with enabled ASLR "
|
||||
"and binaries compiled with PIE\n");
|
||||
Die();
|
||||
}
|
||||
#else
|
||||
# else
|
||||
// Do nothing
|
||||
#endif
|
||||
# endif
|
||||
}
|
||||
|
||||
void CheckMPROTECT() {
|
||||
|
@ -55,6 +55,9 @@ struct ScopedBlockSignals {
|
||||
explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);
|
||||
~ScopedBlockSignals();
|
||||
|
||||
ScopedBlockSignals &operator=(const ScopedBlockSignals &) = delete;
|
||||
ScopedBlockSignals(const ScopedBlockSignals &) = delete;
|
||||
|
||||
private:
|
||||
__sanitizer_sigset_t saved_;
|
||||
};
|
||||
|
@ -216,7 +216,8 @@ void InitTlsSize() { }
|
||||
// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
|
||||
// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
|
||||
// to get the pointer to thread-specific data keys in the thread control block.
|
||||
#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID
|
||||
#if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
|
||||
!SANITIZER_ANDROID && !SANITIZER_GO
|
||||
// sizeof(struct pthread) from glibc.
|
||||
static atomic_uintptr_t thread_descriptor_size;
|
||||
|
||||
@ -319,7 +320,6 @@ static uptr TlsPreTcbSize() {
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !SANITIZER_GO
|
||||
namespace {
|
||||
struct TlsBlock {
|
||||
uptr begin, end, align;
|
||||
@ -407,9 +407,8 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
|
||||
*addr = ranges[l].begin;
|
||||
*size = ranges[r - 1].end - ranges[l].begin;
|
||||
}
|
||||
#endif // !SANITIZER_GO
|
||||
#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
|
||||
// SANITIZER_LINUX) && !SANITIZER_ANDROID
|
||||
// SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
|
||||
|
||||
#if SANITIZER_NETBSD
|
||||
static struct tls_tcb * ThreadSelfTlsTcb() {
|
||||
@ -478,7 +477,7 @@ static void GetTls(uptr *addr, uptr *size) {
|
||||
const uptr pre_tcb_size = TlsPreTcbSize();
|
||||
*addr = tp - pre_tcb_size;
|
||||
*size = g_tls_size + pre_tcb_size;
|
||||
#elif SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
#elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
|
||||
uptr align;
|
||||
GetStaticTlsBoundary(addr, size, &align);
|
||||
#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
|
||||
@ -539,11 +538,6 @@ static void GetTls(uptr *addr, uptr *size) {
|
||||
*addr = (uptr)tcb->tcb_dtv[1];
|
||||
}
|
||||
}
|
||||
#elif SANITIZER_SOLARIS
|
||||
// FIXME
|
||||
*addr = 0;
|
||||
*size = 0;
|
||||
#else
|
||||
#error "Unknown OS"
|
||||
#endif
|
||||
}
|
||||
@ -614,6 +608,34 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
|
||||
bool writable = phdr->p_flags & PF_W;
|
||||
cur_module.addAddressRange(cur_beg, cur_end, executable,
|
||||
writable);
|
||||
} else if (phdr->p_type == PT_NOTE) {
|
||||
# ifdef NT_GNU_BUILD_ID
|
||||
uptr off = 0;
|
||||
while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) {
|
||||
auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr +
|
||||
phdr->p_vaddr + off);
|
||||
constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte.
|
||||
static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4.");
|
||||
if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) {
|
||||
if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz >
|
||||
phdr->p_memsz) {
|
||||
// Something is very wrong, bail out instead of reading potentially
|
||||
// arbitrary memory.
|
||||
break;
|
||||
}
|
||||
const char *name =
|
||||
reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr);
|
||||
if (internal_memcmp(name, "GNU", 3) == 0) {
|
||||
const char *value = reinterpret_cast<const char *>(nhdr) +
|
||||
sizeof(*nhdr) + kGnuNamesz;
|
||||
cur_module.setUuid(value, nhdr->n_descsz);
|
||||
break;
|
||||
}
|
||||
}
|
||||
off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) +
|
||||
RoundUpTo(nhdr->n_descsz, 4);
|
||||
}
|
||||
# endif
|
||||
}
|
||||
}
|
||||
modules->push_back(cur_module);
|
||||
@ -770,13 +792,9 @@ u32 GetNumberOfCPUs() {
|
||||
#elif SANITIZER_SOLARIS
|
||||
return sysconf(_SC_NPROCESSORS_ONLN);
|
||||
#else
|
||||
#if defined(CPU_COUNT)
|
||||
cpu_set_t CPUs;
|
||||
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
|
||||
return CPU_COUNT(&CPUs);
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -57,8 +57,10 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
|
||||
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr) {
|
||||
if (!fn || !child_stack)
|
||||
return -EINVAL;
|
||||
if (!fn || !child_stack) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
CHECK_EQ(0, (uptr)child_stack % 16);
|
||||
// Minimum frame size.
|
||||
#ifdef __s390x__
|
||||
@ -71,9 +73,9 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
// And pass parameters.
|
||||
((unsigned long *)child_stack)[1] = (uptr)fn;
|
||||
((unsigned long *)child_stack)[2] = (uptr)arg;
|
||||
register long res __asm__("r2");
|
||||
register uptr res __asm__("r2");
|
||||
register void *__cstack __asm__("r2") = child_stack;
|
||||
register int __flags __asm__("r3") = flags;
|
||||
register long __flags __asm__("r3") = flags;
|
||||
register int * __ptidptr __asm__("r4") = parent_tidptr;
|
||||
register int * __ctidptr __asm__("r5") = child_tidptr;
|
||||
register void * __newtls __asm__("r6") = newtls;
|
||||
@ -113,6 +115,10 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
"r"(__ctidptr),
|
||||
"r"(__newtls)
|
||||
: "memory", "cc");
|
||||
if (res >= (uptr)-4095) {
|
||||
errno = -res;
|
||||
return -1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
159
libsanitizer/sanitizer_common/sanitizer_lzw.h
Normal file
159
libsanitizer/sanitizer_common/sanitizer_lzw.h
Normal file
@ -0,0 +1,159 @@
|
||||
//===-- sanitizer_lzw.h -----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Lempel–Ziv–Welch encoding/decoding
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LZW_H
|
||||
#define SANITIZER_LZW_H
|
||||
|
||||
#include "sanitizer_dense_map.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
using LzwCodeType = u32;
|
||||
|
||||
template <class T, class ItIn, class ItOut>
|
||||
ItOut LzwEncode(ItIn begin, ItIn end, ItOut out) {
|
||||
using Substring =
|
||||
detail::DenseMapPair<LzwCodeType /* Prefix */, T /* Next input */>;
|
||||
|
||||
// Sentinel value for substrings of len 1.
|
||||
static constexpr LzwCodeType kNoPrefix =
|
||||
Min(DenseMapInfo<Substring>::getEmptyKey().first,
|
||||
DenseMapInfo<Substring>::getTombstoneKey().first) -
|
||||
1;
|
||||
DenseMap<Substring, LzwCodeType> prefix_to_code;
|
||||
{
|
||||
// Add all substring of len 1 as initial dictionary.
|
||||
InternalMmapVector<T> dict_len1;
|
||||
for (auto it = begin; it != end; ++it)
|
||||
if (prefix_to_code.try_emplace({kNoPrefix, *it}, 0).second)
|
||||
dict_len1.push_back(*it);
|
||||
|
||||
// Slightly helps with later delta encoding.
|
||||
Sort(dict_len1.data(), dict_len1.size());
|
||||
|
||||
// For large sizeof(T) we have to store dict_len1. Smaller types like u8 can
|
||||
// just generate them.
|
||||
*out = dict_len1.size();
|
||||
++out;
|
||||
|
||||
for (uptr i = 0; i != dict_len1.size(); ++i) {
|
||||
// Remap after the Sort.
|
||||
prefix_to_code[{kNoPrefix, dict_len1[i]}] = i;
|
||||
*out = dict_len1[i];
|
||||
++out;
|
||||
}
|
||||
CHECK_EQ(prefix_to_code.size(), dict_len1.size());
|
||||
}
|
||||
|
||||
if (begin == end)
|
||||
return out;
|
||||
|
||||
// Main LZW encoding loop.
|
||||
LzwCodeType match = prefix_to_code.find({kNoPrefix, *begin})->second;
|
||||
++begin;
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
// Extend match with the new item.
|
||||
auto ins = prefix_to_code.try_emplace({match, *it}, prefix_to_code.size());
|
||||
if (ins.second) {
|
||||
// This is a new substring, but emit the code for the current match
|
||||
// (before extend). This allows LZW decoder to recover the dictionary.
|
||||
*out = match;
|
||||
++out;
|
||||
// Reset the match to a single item, which must be already in the map.
|
||||
match = prefix_to_code.find({kNoPrefix, *it})->second;
|
||||
} else {
|
||||
// Already known, use as the current match.
|
||||
match = ins.first->second;
|
||||
}
|
||||
}
|
||||
|
||||
*out = match;
|
||||
++out;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
template <class T, class ItIn, class ItOut>
|
||||
ItOut LzwDecode(ItIn begin, ItIn end, ItOut out) {
|
||||
if (begin == end)
|
||||
return out;
|
||||
|
||||
// Load dictionary of len 1 substrings. Theses correspont to lowest codes.
|
||||
InternalMmapVector<T> dict_len1(*begin);
|
||||
++begin;
|
||||
|
||||
if (begin == end)
|
||||
return out;
|
||||
|
||||
for (auto& v : dict_len1) {
|
||||
v = *begin;
|
||||
++begin;
|
||||
}
|
||||
|
||||
// Substrings of len 2 and up. Indexes are shifted because [0,
|
||||
// dict_len1.size()) stored in dict_len1. Substings get here after being
|
||||
// emitted to the output, so we can use output position.
|
||||
InternalMmapVector<detail::DenseMapPair<ItOut /* begin. */, ItOut /* end */>>
|
||||
code_to_substr;
|
||||
|
||||
// Copies already emitted substrings into the output again.
|
||||
auto copy = [&code_to_substr, &dict_len1](LzwCodeType code, ItOut out) {
|
||||
if (code < dict_len1.size()) {
|
||||
*out = dict_len1[code];
|
||||
++out;
|
||||
return out;
|
||||
}
|
||||
const auto& s = code_to_substr[code - dict_len1.size()];
|
||||
|
||||
for (ItOut it = s.first; it != s.second; ++it, ++out) *out = *it;
|
||||
return out;
|
||||
};
|
||||
|
||||
// Returns lens of the substring with the given code.
|
||||
auto code_to_len = [&code_to_substr, &dict_len1](LzwCodeType code) -> uptr {
|
||||
if (code < dict_len1.size())
|
||||
return 1;
|
||||
const auto& s = code_to_substr[code - dict_len1.size()];
|
||||
return s.second - s.first;
|
||||
};
|
||||
|
||||
// Main LZW decoding loop.
|
||||
LzwCodeType prev_code = *begin;
|
||||
++begin;
|
||||
out = copy(prev_code, out);
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
LzwCodeType code = *it;
|
||||
auto start = out;
|
||||
if (code == dict_len1.size() + code_to_substr.size()) {
|
||||
// Special LZW case. The code is not in the dictionary yet. This is
|
||||
// possible only when the new substring is the same as previous one plus
|
||||
// the first item of the previous substring. We can emit that in two
|
||||
// steps.
|
||||
out = copy(prev_code, out);
|
||||
*out = *start;
|
||||
++out;
|
||||
} else {
|
||||
out = copy(code, out);
|
||||
}
|
||||
|
||||
// Every time encoded emits the code, it also creates substing of len + 1
|
||||
// including the first item of the just emmited substring. Do the same here.
|
||||
uptr len = code_to_len(prev_code);
|
||||
code_to_substr.push_back({start - len, start + 1});
|
||||
|
||||
prev_code = code;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
#endif
|
@ -25,6 +25,7 @@
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_file.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_platform_limits_posix.h"
|
||||
@ -37,7 +38,7 @@
|
||||
extern char **environ;
|
||||
#endif
|
||||
|
||||
#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
|
||||
#if defined(__has_include) && __has_include(<os/trace.h>)
|
||||
#define SANITIZER_OS_TRACE 1
|
||||
#include <os/trace.h>
|
||||
#else
|
||||
@ -70,15 +71,7 @@ extern "C" {
|
||||
#include <mach/mach_time.h>
|
||||
#include <mach/vm_statistics.h>
|
||||
#include <malloc/malloc.h>
|
||||
#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
|
||||
# include <os/log.h>
|
||||
#else
|
||||
/* Without support for __builtin_os_log_format, fall back to the older
|
||||
method. */
|
||||
# define OS_LOG_DEFAULT 0
|
||||
# define os_log_error(A,B,C) \
|
||||
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
|
||||
#endif
|
||||
#include <os/log.h>
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h>
|
||||
@ -273,30 +266,32 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
|
||||
|
||||
static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
|
||||
pid_t *pid) {
|
||||
fd_t master_fd = kInvalidFd;
|
||||
fd_t slave_fd = kInvalidFd;
|
||||
fd_t primary_fd = kInvalidFd;
|
||||
fd_t secondary_fd = kInvalidFd;
|
||||
|
||||
auto fd_closer = at_scope_exit([&] {
|
||||
internal_close(master_fd);
|
||||
internal_close(slave_fd);
|
||||
internal_close(primary_fd);
|
||||
internal_close(secondary_fd);
|
||||
});
|
||||
|
||||
// We need a new pseudoterminal to avoid buffering problems. The 'atos' tool
|
||||
// in particular detects when it's talking to a pipe and forgets to flush the
|
||||
// output stream after sending a response.
|
||||
master_fd = posix_openpt(O_RDWR);
|
||||
if (master_fd == kInvalidFd) return kInvalidFd;
|
||||
primary_fd = posix_openpt(O_RDWR);
|
||||
if (primary_fd == kInvalidFd)
|
||||
return kInvalidFd;
|
||||
|
||||
int res = grantpt(master_fd) || unlockpt(master_fd);
|
||||
int res = grantpt(primary_fd) || unlockpt(primary_fd);
|
||||
if (res != 0) return kInvalidFd;
|
||||
|
||||
// Use TIOCPTYGNAME instead of ptsname() to avoid threading problems.
|
||||
char slave_pty_name[128];
|
||||
res = ioctl(master_fd, TIOCPTYGNAME, slave_pty_name);
|
||||
char secondary_pty_name[128];
|
||||
res = ioctl(primary_fd, TIOCPTYGNAME, secondary_pty_name);
|
||||
if (res == -1) return kInvalidFd;
|
||||
|
||||
slave_fd = internal_open(slave_pty_name, O_RDWR);
|
||||
if (slave_fd == kInvalidFd) return kInvalidFd;
|
||||
secondary_fd = internal_open(secondary_pty_name, O_RDWR);
|
||||
if (secondary_fd == kInvalidFd)
|
||||
return kInvalidFd;
|
||||
|
||||
// File descriptor actions
|
||||
posix_spawn_file_actions_t acts;
|
||||
@ -307,9 +302,9 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
|
||||
posix_spawn_file_actions_destroy(&acts);
|
||||
});
|
||||
|
||||
res = posix_spawn_file_actions_adddup2(&acts, slave_fd, STDIN_FILENO) ||
|
||||
posix_spawn_file_actions_adddup2(&acts, slave_fd, STDOUT_FILENO) ||
|
||||
posix_spawn_file_actions_addclose(&acts, slave_fd);
|
||||
res = posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDIN_FILENO) ||
|
||||
posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDOUT_FILENO) ||
|
||||
posix_spawn_file_actions_addclose(&acts, secondary_fd);
|
||||
if (res != 0) return kInvalidFd;
|
||||
|
||||
// Spawn attributes
|
||||
@ -334,14 +329,14 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
|
||||
|
||||
// Disable echo in the new terminal, disable CR.
|
||||
struct termios termflags;
|
||||
tcgetattr(master_fd, &termflags);
|
||||
tcgetattr(primary_fd, &termflags);
|
||||
termflags.c_oflag &= ~ONLCR;
|
||||
termflags.c_lflag &= ~ECHO;
|
||||
tcsetattr(master_fd, TCSANOW, &termflags);
|
||||
tcsetattr(primary_fd, TCSANOW, &termflags);
|
||||
|
||||
// On success, do not close master_fd on scope exit.
|
||||
fd_t fd = master_fd;
|
||||
master_fd = kInvalidFd;
|
||||
// On success, do not close primary_fd on scope exit.
|
||||
fd_t fd = primary_fd;
|
||||
primary_fd = kInvalidFd;
|
||||
|
||||
return fd;
|
||||
}
|
||||
@ -398,6 +393,13 @@ bool FileExists(const char *filename) {
|
||||
return S_ISREG(st.st_mode);
|
||||
}
|
||||
|
||||
bool DirExists(const char *path) {
|
||||
struct stat st;
|
||||
if (stat(path, &st))
|
||||
return false;
|
||||
return S_ISDIR(st.st_mode);
|
||||
}
|
||||
|
||||
tid_t GetTid() {
|
||||
tid_t tid;
|
||||
pthread_threadid_np(nullptr, &tid);
|
||||
@ -877,9 +879,9 @@ void LogFullErrorReport(const char *buffer) {
|
||||
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
ucontext_t *ucontext = static_cast<ucontext_t*>(context);
|
||||
return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ;
|
||||
return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? Write : Read;
|
||||
#else
|
||||
return UNKNOWN;
|
||||
return Unknown;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -894,18 +896,14 @@ bool SignalContext::IsTrueFaultingAddress() const {
|
||||
(uptr)ptrauth_strip( \
|
||||
(void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)
|
||||
#else
|
||||
#define AARCH64_GET_REG(r) ucontext->uc_mcontext->__ss.__##r
|
||||
#define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r
|
||||
#endif
|
||||
|
||||
static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
# if defined(__aarch64__)
|
||||
*pc = AARCH64_GET_REG(pc);
|
||||
# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
|
||||
*bp = AARCH64_GET_REG(fp);
|
||||
# else
|
||||
*bp = AARCH64_GET_REG(lr);
|
||||
# endif
|
||||
*sp = AARCH64_GET_REG(sp);
|
||||
# elif defined(__x86_64__)
|
||||
*pc = ucontext->uc_mcontext->__ss.__rip;
|
||||
@ -1057,12 +1055,12 @@ void MaybeReexec() {
|
||||
}
|
||||
|
||||
// Verify that interceptors really work. We'll use dlsym to locate
|
||||
// "pthread_create", if interceptors are working, it should really point to
|
||||
// "wrap_pthread_create" within our own dylib.
|
||||
Dl_info info_pthread_create;
|
||||
void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create");
|
||||
RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create));
|
||||
if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) {
|
||||
// "puts", if interceptors are working, it should really point to
|
||||
// "wrap_puts" within our own dylib.
|
||||
Dl_info info_puts;
|
||||
void *dlopen_addr = dlsym(RTLD_DEFAULT, "puts");
|
||||
RAW_CHECK(dladdr(dlopen_addr, &info_puts));
|
||||
if (internal_strcmp(info.dli_fname, info_puts.dli_fname) != 0) {
|
||||
Report(
|
||||
"ERROR: Interceptors are not working. This may be because %s is "
|
||||
"loaded too late (e.g. via dlopen). Please launch the executable "
|
||||
@ -1229,7 +1227,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
|
||||
|
||||
uptr largest_gap_found = 0;
|
||||
uptr max_occupied_addr = 0;
|
||||
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
|
||||
VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
|
||||
uptr shadow_start =
|
||||
FindAvailableMemoryRange(space_size, alignment, granularity,
|
||||
&largest_gap_found, &max_occupied_addr);
|
||||
@ -1238,20 +1236,21 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
|
||||
VReport(
|
||||
2,
|
||||
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
|
||||
largest_gap_found, max_occupied_addr);
|
||||
(void *)largest_gap_found, (void *)max_occupied_addr);
|
||||
uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
|
||||
if (new_max_vm < max_occupied_addr) {
|
||||
Report("Unable to find a memory range for dynamic shadow.\n");
|
||||
Report(
|
||||
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
|
||||
"new_max_vm = %p\n",
|
||||
space_size, largest_gap_found, max_occupied_addr, new_max_vm);
|
||||
(void *)space_size, (void *)largest_gap_found,
|
||||
(void *)max_occupied_addr, (void *)new_max_vm);
|
||||
CHECK(0 && "cannot place shadow");
|
||||
}
|
||||
RestrictMemoryToMaxAddress(new_max_vm);
|
||||
high_mem_end = new_max_vm - 1;
|
||||
space_size = (high_mem_end >> shadow_scale) + left_padding;
|
||||
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
|
||||
VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
|
||||
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
|
||||
nullptr, nullptr);
|
||||
if (shadow_start == 0) {
|
||||
@ -1331,7 +1330,7 @@ void SignalContext::DumpAllRegisters(void *context) {
|
||||
# define DUMPREG64(r) \
|
||||
Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
|
||||
# define DUMPREGA64(r) \
|
||||
Printf(" %s = 0x%016llx ", #r, AARCH64_GET_REG(r));
|
||||
Printf(" %s = 0x%016lx ", #r, AARCH64_GET_REG(r));
|
||||
# define DUMPREG32(r) \
|
||||
Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
|
||||
# define DUMPREG_(r) Printf(" "); DUMPREG(r);
|
||||
@ -1401,7 +1400,7 @@ void DumpProcessMap() {
|
||||
char uuid_str[128];
|
||||
FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
|
||||
Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
|
||||
modules[i].max_executable_address(), modules[i].full_name(),
|
||||
modules[i].max_address(), modules[i].full_name(),
|
||||
ModuleArchToString(modules[i].arch()), uuid_str);
|
||||
}
|
||||
Printf("End of module map.\n");
|
||||
|
@ -14,26 +14,6 @@
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
|
||||
TARGET_OS_MAC (we have no support for iOS in any form for these versions,
|
||||
so there's no ambiguity). */
|
||||
#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
|
||||
# define TARGET_OS_OSX 1
|
||||
#endif
|
||||
|
||||
/* Other TARGET_OS_xxx are not present on earlier versions, define them to
|
||||
0 (we have no support for them; they are not valid targets anyway). */
|
||||
#ifndef TARGET_OS_IOS
|
||||
#define TARGET_OS_IOS 0
|
||||
#endif
|
||||
#ifndef TARGET_OS_TV
|
||||
#define TARGET_OS_TV 0
|
||||
#endif
|
||||
#ifndef TARGET_OS_WATCH
|
||||
#define TARGET_OS_WATCH 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_MAC
|
||||
#include "sanitizer_posix.h"
|
||||
|
||||
|
@ -20,25 +20,27 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class MUTEX StaticSpinMutex {
|
||||
class SANITIZER_MUTEX StaticSpinMutex {
|
||||
public:
|
||||
void Init() {
|
||||
atomic_store(&state_, 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Lock() ACQUIRE() {
|
||||
void Lock() SANITIZER_ACQUIRE() {
|
||||
if (LIKELY(TryLock()))
|
||||
return;
|
||||
LockSlow();
|
||||
}
|
||||
|
||||
bool TryLock() TRY_ACQUIRE(true) {
|
||||
bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
|
||||
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
|
||||
}
|
||||
|
||||
void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
|
||||
void Unlock() SANITIZER_RELEASE() {
|
||||
atomic_store(&state_, 0, memory_order_release);
|
||||
}
|
||||
|
||||
void CheckLocked() const CHECK_LOCKED() {
|
||||
void CheckLocked() const SANITIZER_CHECK_LOCKED() {
|
||||
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
|
||||
}
|
||||
|
||||
@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex {
|
||||
void LockSlow();
|
||||
};
|
||||
|
||||
class MUTEX SpinMutex : public StaticSpinMutex {
|
||||
class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
|
||||
public:
|
||||
SpinMutex() {
|
||||
Init();
|
||||
@ -156,12 +158,12 @@ class CheckedMutex {
|
||||
// Derive from CheckedMutex for the purposes of EBO.
|
||||
// We could make it a field marked with [[no_unique_address]],
|
||||
// but this attribute is not supported by some older compilers.
|
||||
class MUTEX Mutex : CheckedMutex {
|
||||
class SANITIZER_MUTEX Mutex : CheckedMutex {
|
||||
public:
|
||||
explicit constexpr Mutex(MutexType type = MutexUnchecked)
|
||||
: CheckedMutex(type) {}
|
||||
|
||||
void Lock() ACQUIRE() {
|
||||
void Lock() SANITIZER_ACQUIRE() {
|
||||
CheckedMutex::Lock();
|
||||
u64 reset_mask = ~0ull;
|
||||
u64 state = atomic_load_relaxed(&state_);
|
||||
@ -206,7 +208,21 @@ class MUTEX Mutex : CheckedMutex {
|
||||
}
|
||||
}
|
||||
|
||||
void Unlock() RELEASE() {
|
||||
bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
|
||||
u64 state = atomic_load_relaxed(&state_);
|
||||
for (;;) {
|
||||
if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
|
||||
return false;
|
||||
// The mutex is not read-/write-locked, try to lock.
|
||||
if (LIKELY(atomic_compare_exchange_weak(
|
||||
&state_, &state, state | kWriterLock, memory_order_acquire))) {
|
||||
CheckedMutex::Lock();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Unlock() SANITIZER_RELEASE() {
|
||||
CheckedMutex::Unlock();
|
||||
bool wake_writer;
|
||||
u64 wake_readers;
|
||||
@ -234,7 +250,7 @@ class MUTEX Mutex : CheckedMutex {
|
||||
readers_.Post(wake_readers);
|
||||
}
|
||||
|
||||
void ReadLock() ACQUIRE_SHARED() {
|
||||
void ReadLock() SANITIZER_ACQUIRE_SHARED() {
|
||||
CheckedMutex::Lock();
|
||||
u64 reset_mask = ~0ull;
|
||||
u64 state = atomic_load_relaxed(&state_);
|
||||
@ -271,7 +287,7 @@ class MUTEX Mutex : CheckedMutex {
|
||||
}
|
||||
}
|
||||
|
||||
void ReadUnlock() RELEASE_SHARED() {
|
||||
void ReadUnlock() SANITIZER_RELEASE_SHARED() {
|
||||
CheckedMutex::Unlock();
|
||||
bool wake;
|
||||
u64 new_state;
|
||||
@ -297,13 +313,13 @@ class MUTEX Mutex : CheckedMutex {
|
||||
// owns the mutex but a child checks that it is locked. Rather than
|
||||
// maintaining complex state to work around those situations, the check only
|
||||
// checks that the mutex is owned.
|
||||
void CheckWriteLocked() const CHECK_LOCKED() {
|
||||
void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
|
||||
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
|
||||
}
|
||||
|
||||
void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
|
||||
void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
|
||||
|
||||
void CheckReadLocked() const CHECK_LOCKED() {
|
||||
void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
|
||||
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
|
||||
}
|
||||
|
||||
@ -361,13 +377,13 @@ void FutexWait(atomic_uint32_t *p, u32 cmp);
|
||||
void FutexWake(atomic_uint32_t *p, u32 count);
|
||||
|
||||
template <typename MutexType>
|
||||
class SCOPED_LOCK GenericScopedLock {
|
||||
class SANITIZER_SCOPED_LOCK GenericScopedLock {
|
||||
public:
|
||||
explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
|
||||
explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
|
||||
mu_->Lock();
|
||||
}
|
||||
|
||||
~GenericScopedLock() RELEASE() { mu_->Unlock(); }
|
||||
~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
|
||||
|
||||
private:
|
||||
MutexType *mu_;
|
||||
@ -377,13 +393,14 @@ class SCOPED_LOCK GenericScopedLock {
|
||||
};
|
||||
|
||||
template <typename MutexType>
|
||||
class SCOPED_LOCK GenericScopedReadLock {
|
||||
class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
|
||||
public:
|
||||
explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
|
||||
explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
|
||||
: mu_(mu) {
|
||||
mu_->ReadLock();
|
||||
}
|
||||
|
||||
~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
|
||||
~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
|
||||
|
||||
private:
|
||||
MutexType *mu_;
|
||||
@ -393,10 +410,10 @@ class SCOPED_LOCK GenericScopedReadLock {
|
||||
};
|
||||
|
||||
template <typename MutexType>
|
||||
class SCOPED_LOCK GenericScopedRWLock {
|
||||
class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
|
||||
public:
|
||||
ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
|
||||
ACQUIRE(mu)
|
||||
SANITIZER_ACQUIRE(mu)
|
||||
: mu_(mu), write_(write) {
|
||||
if (write_)
|
||||
mu_->Lock();
|
||||
@ -404,7 +421,7 @@ class SCOPED_LOCK GenericScopedRWLock {
|
||||
mu_->ReadLock();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() {
|
||||
ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
|
||||
if (write_)
|
||||
mu_->Unlock();
|
||||
else
|
||||
|
@ -1,110 +0,0 @@
|
||||
//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// A fast memory allocator that does not support free() nor realloc().
|
||||
// All allocations are forever.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
|
||||
#define SANITIZER_PERSISTENT_ALLOCATOR_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
template <typename T>
|
||||
class PersistentAllocator {
|
||||
public:
|
||||
T *alloc(uptr count = 1);
|
||||
uptr allocated() const { return atomic_load_relaxed(&mapped_size); }
|
||||
|
||||
void TestOnlyUnmap();
|
||||
|
||||
private:
|
||||
T *tryAlloc(uptr count);
|
||||
T *refillAndAlloc(uptr count);
|
||||
mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
|
||||
atomic_uintptr_t region_pos; // Region allocator for Node's.
|
||||
atomic_uintptr_t region_end;
|
||||
atomic_uintptr_t mapped_size;
|
||||
|
||||
struct BlockInfo {
|
||||
const BlockInfo *next;
|
||||
uptr ptr;
|
||||
uptr size;
|
||||
};
|
||||
const BlockInfo *curr;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline T *PersistentAllocator<T>::tryAlloc(uptr count) {
|
||||
// Optimisic lock-free allocation, essentially try to bump the region ptr.
|
||||
for (;;) {
|
||||
uptr cmp = atomic_load(®ion_pos, memory_order_acquire);
|
||||
uptr end = atomic_load(®ion_end, memory_order_acquire);
|
||||
uptr size = count * sizeof(T);
|
||||
if (cmp == 0 || cmp + size > end)
|
||||
return nullptr;
|
||||
if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size,
|
||||
memory_order_acquire))
|
||||
return reinterpret_cast<T *>(cmp);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T *PersistentAllocator<T>::alloc(uptr count) {
|
||||
// First, try to allocate optimisitically.
|
||||
T *s = tryAlloc(count);
|
||||
if (LIKELY(s))
|
||||
return s;
|
||||
return refillAndAlloc(count);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T *PersistentAllocator<T>::refillAndAlloc(uptr count) {
|
||||
// If failed, lock, retry and alloc new superblock.
|
||||
SpinMutexLock l(&mtx);
|
||||
for (;;) {
|
||||
T *s = tryAlloc(count);
|
||||
if (s)
|
||||
return s;
|
||||
atomic_store(®ion_pos, 0, memory_order_relaxed);
|
||||
uptr size = count * sizeof(T) + sizeof(BlockInfo);
|
||||
uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
|
||||
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
||||
BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
|
||||
new_block->next = curr;
|
||||
new_block->ptr = mem;
|
||||
new_block->size = allocsz;
|
||||
curr = new_block;
|
||||
|
||||
atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
|
||||
|
||||
allocsz -= sizeof(BlockInfo);
|
||||
atomic_store(®ion_end, mem + allocsz, memory_order_release);
|
||||
atomic_store(®ion_pos, mem, memory_order_release);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void PersistentAllocator<T>::TestOnlyUnmap() {
|
||||
while (curr) {
|
||||
uptr mem = curr->ptr;
|
||||
uptr allocsz = curr->size;
|
||||
curr = curr->next;
|
||||
UnmapOrDie((void *)mem, allocsz);
|
||||
}
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
|
@ -22,103 +22,110 @@
|
||||
// function declarations into a .S file which doesn't compile.
|
||||
// https://crbug.com/1162741
|
||||
#if __has_include(<features.h>) && !defined(__ANDROID__)
|
||||
#include <features.h>
|
||||
# include <features.h>
|
||||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
# define SANITIZER_LINUX 1
|
||||
# define SANITIZER_LINUX 1
|
||||
#else
|
||||
# define SANITIZER_LINUX 0
|
||||
# define SANITIZER_LINUX 0
|
||||
#endif
|
||||
|
||||
#if defined(__GLIBC__)
|
||||
# define SANITIZER_GLIBC 1
|
||||
# define SANITIZER_GLIBC 1
|
||||
#else
|
||||
# define SANITIZER_GLIBC 0
|
||||
# define SANITIZER_GLIBC 0
|
||||
#endif
|
||||
|
||||
#if defined(__FreeBSD__)
|
||||
# define SANITIZER_FREEBSD 1
|
||||
# define SANITIZER_FREEBSD 1
|
||||
#else
|
||||
# define SANITIZER_FREEBSD 0
|
||||
# define SANITIZER_FREEBSD 0
|
||||
#endif
|
||||
|
||||
#if defined(__NetBSD__)
|
||||
# define SANITIZER_NETBSD 1
|
||||
# define SANITIZER_NETBSD 1
|
||||
#else
|
||||
# define SANITIZER_NETBSD 0
|
||||
# define SANITIZER_NETBSD 0
|
||||
#endif
|
||||
|
||||
#if defined(__sun__) && defined(__svr4__)
|
||||
# define SANITIZER_SOLARIS 1
|
||||
# define SANITIZER_SOLARIS 1
|
||||
#else
|
||||
# define SANITIZER_SOLARIS 0
|
||||
# define SANITIZER_SOLARIS 0
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
# define SANITIZER_MAC 1
|
||||
# include <TargetConditionals.h>
|
||||
# if TARGET_OS_OSX
|
||||
# define SANITIZER_OSX 1
|
||||
# else
|
||||
# define SANITIZER_OSX 0
|
||||
# endif
|
||||
# if TARGET_OS_IPHONE
|
||||
# define SANITIZER_IOS 1
|
||||
# else
|
||||
# define SANITIZER_IOS 0
|
||||
# endif
|
||||
# if TARGET_OS_SIMULATOR
|
||||
# define SANITIZER_IOSSIM 1
|
||||
# else
|
||||
# define SANITIZER_IOSSIM 0
|
||||
# endif
|
||||
# define SANITIZER_MAC 1
|
||||
# include <TargetConditionals.h>
|
||||
# if TARGET_OS_OSX
|
||||
# define SANITIZER_OSX 1
|
||||
# else
|
||||
# define SANITIZER_OSX 0
|
||||
# endif
|
||||
# if TARGET_OS_IPHONE
|
||||
# define SANITIZER_IOS 1
|
||||
# else
|
||||
# define SANITIZER_IOS 0
|
||||
# endif
|
||||
# if TARGET_OS_SIMULATOR
|
||||
# define SANITIZER_IOSSIM 1
|
||||
# else
|
||||
# define SANITIZER_IOSSIM 0
|
||||
# endif
|
||||
#else
|
||||
# define SANITIZER_MAC 0
|
||||
# define SANITIZER_IOS 0
|
||||
# define SANITIZER_IOSSIM 0
|
||||
# define SANITIZER_OSX 0
|
||||
# define SANITIZER_MAC 0
|
||||
# define SANITIZER_IOS 0
|
||||
# define SANITIZER_IOSSIM 0
|
||||
# define SANITIZER_OSX 0
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
|
||||
# define SANITIZER_WATCHOS 1
|
||||
# define SANITIZER_WATCHOS 1
|
||||
#else
|
||||
# define SANITIZER_WATCHOS 0
|
||||
# define SANITIZER_WATCHOS 0
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV
|
||||
# define SANITIZER_TVOS 1
|
||||
# define SANITIZER_TVOS 1
|
||||
#else
|
||||
# define SANITIZER_TVOS 0
|
||||
# define SANITIZER_TVOS 0
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
# define SANITIZER_WINDOWS 1
|
||||
# define SANITIZER_WINDOWS 1
|
||||
#else
|
||||
# define SANITIZER_WINDOWS 0
|
||||
# define SANITIZER_WINDOWS 0
|
||||
#endif
|
||||
|
||||
#if defined(_WIN64)
|
||||
# define SANITIZER_WINDOWS64 1
|
||||
# define SANITIZER_WINDOWS64 1
|
||||
#else
|
||||
# define SANITIZER_WINDOWS64 0
|
||||
# define SANITIZER_WINDOWS64 0
|
||||
#endif
|
||||
|
||||
#if defined(__ANDROID__)
|
||||
# define SANITIZER_ANDROID 1
|
||||
# define SANITIZER_ANDROID 1
|
||||
#else
|
||||
# define SANITIZER_ANDROID 0
|
||||
# define SANITIZER_ANDROID 0
|
||||
#endif
|
||||
|
||||
#if defined(__Fuchsia__)
|
||||
# define SANITIZER_FUCHSIA 1
|
||||
# define SANITIZER_FUCHSIA 1
|
||||
#else
|
||||
# define SANITIZER_FUCHSIA 0
|
||||
# define SANITIZER_FUCHSIA 0
|
||||
#endif
|
||||
|
||||
#define SANITIZER_POSIX \
|
||||
// Assume linux that is not glibc or android is musl libc.
|
||||
#if SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID
|
||||
# define SANITIZER_MUSL 1
|
||||
#else
|
||||
# define SANITIZER_MUSL 0
|
||||
#endif
|
||||
|
||||
#define SANITIZER_POSIX \
|
||||
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
|
||||
SANITIZER_NETBSD || SANITIZER_SOLARIS)
|
||||
SANITIZER_NETBSD || SANITIZER_SOLARIS)
|
||||
|
||||
#if __LP64__ || defined(_WIN64)
|
||||
# define SANITIZER_WORDSIZE 64
|
||||
@ -127,58 +134,64 @@
|
||||
#endif
|
||||
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
# define FIRST_32_SECOND_64(a, b) (b)
|
||||
# define FIRST_32_SECOND_64(a, b) (b)
|
||||
#else
|
||||
# define FIRST_32_SECOND_64(a, b) (a)
|
||||
# define FIRST_32_SECOND_64(a, b) (a)
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__) && !defined(_LP64)
|
||||
# define SANITIZER_X32 1
|
||||
# define SANITIZER_X32 1
|
||||
#else
|
||||
# define SANITIZER_X32 0
|
||||
# define SANITIZER_X32 0
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
# define SANITIZER_X64 1
|
||||
#else
|
||||
# define SANITIZER_X64 0
|
||||
#endif
|
||||
|
||||
#if defined(__i386__) || defined(_M_IX86)
|
||||
# define SANITIZER_I386 1
|
||||
# define SANITIZER_I386 1
|
||||
#else
|
||||
# define SANITIZER_I386 0
|
||||
# define SANITIZER_I386 0
|
||||
#endif
|
||||
|
||||
#if defined(__mips__)
|
||||
# define SANITIZER_MIPS 1
|
||||
# if defined(__mips64)
|
||||
# define SANITIZER_MIPS32 0
|
||||
# define SANITIZER_MIPS64 1
|
||||
# else
|
||||
# define SANITIZER_MIPS32 1
|
||||
# define SANITIZER_MIPS64 0
|
||||
# endif
|
||||
# define SANITIZER_MIPS 1
|
||||
# if defined(__mips64)
|
||||
# define SANITIZER_MIPS32 0
|
||||
# define SANITIZER_MIPS64 1
|
||||
# else
|
||||
# define SANITIZER_MIPS32 1
|
||||
# define SANITIZER_MIPS64 0
|
||||
# endif
|
||||
#else
|
||||
# define SANITIZER_MIPS 0
|
||||
# define SANITIZER_MIPS32 0
|
||||
# define SANITIZER_MIPS64 0
|
||||
# define SANITIZER_MIPS 0
|
||||
# define SANITIZER_MIPS32 0
|
||||
# define SANITIZER_MIPS64 0
|
||||
#endif
|
||||
|
||||
#if defined(__s390__)
|
||||
# define SANITIZER_S390 1
|
||||
# if defined(__s390x__)
|
||||
# define SANITIZER_S390_31 0
|
||||
# define SANITIZER_S390_64 1
|
||||
# else
|
||||
# define SANITIZER_S390_31 1
|
||||
# define SANITIZER_S390_64 0
|
||||
# endif
|
||||
# define SANITIZER_S390 1
|
||||
# if defined(__s390x__)
|
||||
# define SANITIZER_S390_31 0
|
||||
# define SANITIZER_S390_64 1
|
||||
# else
|
||||
# define SANITIZER_S390_31 1
|
||||
# define SANITIZER_S390_64 0
|
||||
# endif
|
||||
#else
|
||||
# define SANITIZER_S390 0
|
||||
# define SANITIZER_S390_31 0
|
||||
# define SANITIZER_S390_64 0
|
||||
# define SANITIZER_S390 0
|
||||
# define SANITIZER_S390_31 0
|
||||
# define SANITIZER_S390_64 0
|
||||
#endif
|
||||
|
||||
#if defined(__powerpc__)
|
||||
# define SANITIZER_PPC 1
|
||||
# if defined(__powerpc64__)
|
||||
# define SANITIZER_PPC32 0
|
||||
# define SANITIZER_PPC64 1
|
||||
# define SANITIZER_PPC 1
|
||||
# if defined(__powerpc64__)
|
||||
# define SANITIZER_PPC32 0
|
||||
# define SANITIZER_PPC64 1
|
||||
// 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is
|
||||
// big-endian, and uses v1 ABI (known for its function descriptors),
|
||||
// while the new powerpc64le target is little-endian and uses v2.
|
||||
@ -186,43 +199,49 @@
|
||||
// (eg. big-endian v2), but you won't find such combinations in the wild
|
||||
// (it'd require bootstrapping a whole system, which would be quite painful
|
||||
// - there's no target triple for that). LLVM doesn't support them either.
|
||||
# if _CALL_ELF == 2
|
||||
# define SANITIZER_PPC64V1 0
|
||||
# define SANITIZER_PPC64V2 1
|
||||
# if _CALL_ELF == 2
|
||||
# define SANITIZER_PPC64V1 0
|
||||
# define SANITIZER_PPC64V2 1
|
||||
# else
|
||||
# define SANITIZER_PPC64V1 1
|
||||
# define SANITIZER_PPC64V2 0
|
||||
# endif
|
||||
# else
|
||||
# define SANITIZER_PPC64V1 1
|
||||
# define SANITIZER_PPC64V2 0
|
||||
# define SANITIZER_PPC32 1
|
||||
# define SANITIZER_PPC64 0
|
||||
# define SANITIZER_PPC64V1 0
|
||||
# define SANITIZER_PPC64V2 0
|
||||
# endif
|
||||
# else
|
||||
# define SANITIZER_PPC32 1
|
||||
#else
|
||||
# define SANITIZER_PPC 0
|
||||
# define SANITIZER_PPC32 0
|
||||
# define SANITIZER_PPC64 0
|
||||
# define SANITIZER_PPC64V1 0
|
||||
# define SANITIZER_PPC64V2 0
|
||||
# endif
|
||||
#else
|
||||
# define SANITIZER_PPC 0
|
||||
# define SANITIZER_PPC32 0
|
||||
# define SANITIZER_PPC64 0
|
||||
# define SANITIZER_PPC64V1 0
|
||||
# define SANITIZER_PPC64V2 0
|
||||
#endif
|
||||
|
||||
#if defined(__arm__)
|
||||
# define SANITIZER_ARM 1
|
||||
#if defined(__arm__) || defined(_M_ARM)
|
||||
# define SANITIZER_ARM 1
|
||||
#else
|
||||
# define SANITIZER_ARM 0
|
||||
# define SANITIZER_ARM 0
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__) || defined(_M_ARM64)
|
||||
# define SANITIZER_ARM64 1
|
||||
#else
|
||||
# define SANITIZER_ARM64 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
|
||||
# define SANITIZER_SOLARIS32 1
|
||||
# define SANITIZER_SOLARIS32 1
|
||||
#else
|
||||
# define SANITIZER_SOLARIS32 0
|
||||
# define SANITIZER_SOLARIS32 0
|
||||
#endif
|
||||
|
||||
#if defined(__riscv) && (__riscv_xlen == 64)
|
||||
#define SANITIZER_RISCV64 1
|
||||
# define SANITIZER_RISCV64 1
|
||||
#else
|
||||
#define SANITIZER_RISCV64 0
|
||||
# define SANITIZER_RISCV64 0
|
||||
#endif
|
||||
|
||||
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
|
||||
@ -231,62 +250,52 @@
|
||||
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
|
||||
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
|
||||
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
|
||||
# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 1
|
||||
# elif defined(__mips64) || defined(__aarch64__)
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 0
|
||||
# else
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
|
||||
# endif
|
||||
# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 1
|
||||
# elif defined(__mips64) || defined(__aarch64__)
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 0
|
||||
# else
|
||||
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// The range of addresses which can be returned my mmap.
|
||||
// FIXME: this value should be different on different platforms. Larger values
|
||||
// will still work but will consume more memory for TwoLevelByteMap.
|
||||
#if defined(__mips__)
|
||||
#if SANITIZER_GO && defined(__mips64)
|
||||
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
|
||||
#else
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
|
||||
#endif
|
||||
#elif SANITIZER_RISCV64
|
||||
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
|
||||
#elif defined(__aarch64__)
|
||||
# if SANITIZER_MAC
|
||||
# if SANITIZER_OSX || SANITIZER_IOSSIM
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
|
||||
# if SANITIZER_GO && defined(__mips64)
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
|
||||
# else
|
||||
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
|
||||
# endif
|
||||
#elif SANITIZER_RISCV64
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
|
||||
#elif defined(__aarch64__)
|
||||
# if SANITIZER_MAC
|
||||
# if SANITIZER_OSX || SANITIZER_IOSSIM
|
||||
# define SANITIZER_MMAP_RANGE_SIZE \
|
||||
FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
|
||||
# else
|
||||
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
|
||||
# define SANITIZER_MMAP_RANGE_SIZE \
|
||||
FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
|
||||
# endif
|
||||
# else
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
|
||||
# endif
|
||||
# else
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
|
||||
# endif
|
||||
#elif defined(__sparc__)
|
||||
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
|
||||
#else
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
|
||||
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
|
||||
#endif
|
||||
|
||||
// Whether the addresses are sign-extended from the VMA range to the word.
|
||||
// The SPARC64 Linux port implements this to split the VMA space into two
|
||||
// non-contiguous halves with a huge hole in the middle.
|
||||
#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
|
||||
#define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
|
||||
# define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
|
||||
#else
|
||||
#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
|
||||
#endif
|
||||
|
||||
// The AArch64 and RISC-V linux ports use the canonical syscall set as
|
||||
// mandated by the upstream linux community for all new ports. Other ports
|
||||
// may still use legacy syscalls.
|
||||
#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
|
||||
# if (defined(__aarch64__) || defined(__riscv) || defined(__hexagon__)) && \
|
||||
SANITIZER_LINUX
|
||||
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
|
||||
# else
|
||||
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
|
||||
# endif
|
||||
# define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
|
||||
#endif
|
||||
|
||||
// udi16 syscalls can only be used when the following conditions are
|
||||
@ -297,15 +306,15 @@
|
||||
// Since we don't want to include libc headers here, we check the
|
||||
// target only.
|
||||
#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
|
||||
#define SANITIZER_USES_UID16_SYSCALLS 1
|
||||
# define SANITIZER_USES_UID16_SYSCALLS 1
|
||||
#else
|
||||
#define SANITIZER_USES_UID16_SYSCALLS 0
|
||||
# define SANITIZER_USES_UID16_SYSCALLS 0
|
||||
#endif
|
||||
|
||||
#if defined(__mips__)
|
||||
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
|
||||
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
|
||||
#else
|
||||
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
|
||||
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
|
||||
#endif
|
||||
|
||||
/// \macro MSC_PREREQ
|
||||
@ -314,15 +323,15 @@
|
||||
/// * 1800: Microsoft Visual Studio 2013 / 12.0
|
||||
/// * 1900: Microsoft Visual Studio 2015 / 14.0
|
||||
#ifdef _MSC_VER
|
||||
# define MSC_PREREQ(version) (_MSC_VER >= (version))
|
||||
# define MSC_PREREQ(version) (_MSC_VER >= (version))
|
||||
#else
|
||||
# define MSC_PREREQ(version) 0
|
||||
# define MSC_PREREQ(version) 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS)
|
||||
# define SANITIZER_NON_UNIQUE_TYPEINFO 0
|
||||
#if SANITIZER_MAC && defined(__x86_64__)
|
||||
# define SANITIZER_NON_UNIQUE_TYPEINFO 0
|
||||
#else
|
||||
# define SANITIZER_NON_UNIQUE_TYPEINFO 1
|
||||
# define SANITIZER_NON_UNIQUE_TYPEINFO 1
|
||||
#endif
|
||||
|
||||
// On linux, some architectures had an ABI transition from 64-bit long double
|
||||
@ -330,11 +339,11 @@
|
||||
// involving long doubles come in two versions, and we need to pass the
|
||||
// correct one to dlvsym when intercepting them.
|
||||
#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)
|
||||
#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
|
||||
# define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
|
||||
#endif
|
||||
|
||||
#if SANITIZER_GO == 0
|
||||
# define SANITIZER_GO 0
|
||||
# define SANITIZER_GO 0
|
||||
#endif
|
||||
|
||||
// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.
|
||||
@ -342,40 +351,39 @@
|
||||
// dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize
|
||||
// that this allocation happens in dynamic linker and should be ignored.
|
||||
#if SANITIZER_PPC || defined(__thumb__)
|
||||
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
|
||||
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
|
||||
#else
|
||||
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
|
||||
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
# define SANITIZER_MADVISE_DONTNEED MADV_FREE
|
||||
#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || SANITIZER_SOLARIS
|
||||
# define SANITIZER_MADVISE_DONTNEED MADV_FREE
|
||||
#else
|
||||
# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
|
||||
# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
|
||||
#endif
|
||||
|
||||
// Older gcc have issues aligning to a constexpr, and require an integer.
|
||||
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
|
||||
#if defined(__powerpc__) || defined(__powerpc64__)
|
||||
# define SANITIZER_CACHE_LINE_SIZE 128
|
||||
# define SANITIZER_CACHE_LINE_SIZE 128
|
||||
#else
|
||||
# define SANITIZER_CACHE_LINE_SIZE 64
|
||||
# define SANITIZER_CACHE_LINE_SIZE 64
|
||||
#endif
|
||||
|
||||
// Enable offline markup symbolizer for Fuchsia.
|
||||
#if SANITIZER_FUCHSIA
|
||||
# define SANITIZER_SYMBOLIZER_MARKUP 1
|
||||
#else
|
||||
#define SANITIZER_SYMBOLIZER_MARKUP 0
|
||||
# define SANITIZER_SYMBOLIZER_MARKUP 0
|
||||
#endif
|
||||
|
||||
// Enable ability to support sanitizer initialization that is
|
||||
// compatible with the sanitizer library being loaded via
|
||||
// `dlopen()`.
|
||||
#if SANITIZER_MAC
|
||||
#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
|
||||
# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
|
||||
#else
|
||||
#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
|
||||
# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
|
||||
#endif
|
||||
|
||||
// SANITIZER_SUPPORTS_THREADLOCAL
|
||||
@ -392,4 +400,15 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_PLATFORM_H
|
||||
#if defined(__thumb__) && defined(__linux__)
|
||||
// Workaround for
|
||||
// https://lab.llvm.org/buildbot/#/builders/clang-thumbv7-full-2stage
|
||||
// or
|
||||
// https://lab.llvm.org/staging/#/builders/clang-thumbv7-full-2stage
|
||||
// It fails *rss_limit_mb_test* without meaningful errors.
|
||||
# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 1
|
||||
#else
|
||||
# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_PLATFORM_H
|
||||
|
@ -235,6 +235,7 @@
|
||||
#define SANITIZER_INTERCEPT_TIME SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
|
||||
#define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC
|
||||
#define SANITIZER_INTERCEPT___B64_TO SI_LINUX_NOT_ANDROID
|
||||
#define SANITIZER_INTERCEPT_POSIX_SPAWN SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_WAIT SI_POSIX
|
||||
#define SANITIZER_INTERCEPT_INET SI_POSIX
|
||||
@ -465,6 +466,7 @@
|
||||
#define SANITIZER_INTERCEPT_STAT \
|
||||
(SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \
|
||||
SI_STAT_LINUX)
|
||||
#define SANITIZER_INTERCEPT_STAT64 SI_STAT_LINUX
|
||||
#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX)
|
||||
#define SANITIZER_INTERCEPT___XSTAT \
|
||||
((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX)
|
||||
|
@ -130,7 +130,7 @@ unsigned struct_sigevent_sz = sizeof(struct sigevent);
|
||||
unsigned struct_sched_param_sz = sizeof(struct sched_param);
|
||||
unsigned struct_statfs_sz = sizeof(struct statfs);
|
||||
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
|
||||
unsigned ucontext_t_sz = sizeof(ucontext_t);
|
||||
unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
|
||||
unsigned struct_rlimit_sz = sizeof(struct rlimit);
|
||||
unsigned struct_timespec_sz = sizeof(struct timespec);
|
||||
unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
|
||||
|
@ -57,7 +57,7 @@ extern unsigned struct_sched_param_sz;
|
||||
extern unsigned struct_statfs64_sz;
|
||||
extern unsigned struct_statfs_sz;
|
||||
extern unsigned struct_sockaddr_sz;
|
||||
extern unsigned ucontext_t_sz;
|
||||
unsigned ucontext_t_sz(void *ctx);
|
||||
extern unsigned struct_rlimit_sz;
|
||||
extern unsigned struct_utimbuf_sz;
|
||||
extern unsigned struct_timespec_sz;
|
||||
|
@ -26,10 +26,7 @@
|
||||
|
||||
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
|
||||
// are not defined anywhere in userspace headers. Fake them. This seems to work
|
||||
// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
|
||||
// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
|
||||
// Also, for some platforms (e.g. mips) there are additional members in the
|
||||
// <sys/stat.h> struct stat:s.
|
||||
// fine with newer headers, too.
|
||||
#include <linux/posix_types.h>
|
||||
# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
|
||||
# include <sys/stat.h>
|
||||
|
@ -554,7 +554,7 @@ unsigned struct_tms_sz = sizeof(struct tms);
|
||||
unsigned struct_sigevent_sz = sizeof(struct sigevent);
|
||||
unsigned struct_sched_param_sz = sizeof(struct sched_param);
|
||||
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
|
||||
unsigned ucontext_t_sz = sizeof(ucontext_t);
|
||||
unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
|
||||
unsigned struct_rlimit_sz = sizeof(struct rlimit);
|
||||
unsigned struct_timespec_sz = sizeof(struct timespec);
|
||||
unsigned struct_sembuf_sz = sizeof(struct sembuf);
|
||||
|
@ -45,7 +45,7 @@ extern unsigned struct_stack_t_sz;
|
||||
extern unsigned struct_sched_param_sz;
|
||||
extern unsigned struct_statfs_sz;
|
||||
extern unsigned struct_sockaddr_sz;
|
||||
extern unsigned ucontext_t_sz;
|
||||
unsigned ucontext_t_sz(void *ctx);
|
||||
|
||||
extern unsigned struct_rlimit_sz;
|
||||
extern unsigned struct_utimbuf_sz;
|
||||
|
@ -170,8 +170,9 @@ typedef struct user_fpregs elf_fpregset_t;
|
||||
#endif
|
||||
|
||||
// Include these after system headers to avoid name clashes and ambiguities.
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_platform_limits_posix.h"
|
||||
# include "sanitizer_common.h"
|
||||
# include "sanitizer_internal_defs.h"
|
||||
# include "sanitizer_platform_limits_posix.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
unsigned struct_utsname_sz = sizeof(struct utsname);
|
||||
@ -214,10 +215,24 @@ namespace __sanitizer {
|
||||
#if !SANITIZER_ANDROID
|
||||
unsigned struct_statfs_sz = sizeof(struct statfs);
|
||||
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
|
||||
unsigned ucontext_t_sz = sizeof(ucontext_t);
|
||||
#endif // !SANITIZER_ANDROID
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
unsigned ucontext_t_sz(void *ctx) {
|
||||
# if SANITIZER_GLIBC && SANITIZER_X64
|
||||
// See kernel arch/x86/kernel/fpu/signal.c for details.
|
||||
const auto *fpregs = static_cast<ucontext_t *>(ctx)->uc_mcontext.fpregs;
|
||||
// The member names differ across header versions, but the actual layout
|
||||
// is always the same. So avoid using members, just use arithmetic.
|
||||
const uint32_t *after_xmm =
|
||||
reinterpret_cast<const uint32_t *>(fpregs + 1) - 24;
|
||||
if (after_xmm[12] == FP_XSTATE_MAGIC1)
|
||||
return reinterpret_cast<const char *>(fpregs) + after_xmm[13] -
|
||||
static_cast<const char *>(ctx);
|
||||
# endif
|
||||
return sizeof(ucontext_t);
|
||||
}
|
||||
# endif // !SANITIZER_ANDROID
|
||||
|
||||
# if SANITIZER_LINUX
|
||||
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
|
||||
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
|
||||
unsigned __user_cap_header_struct_sz =
|
||||
@ -575,6 +590,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
|
||||
unsigned IOCTL_BLKROGET = BLKROGET;
|
||||
unsigned IOCTL_BLKROSET = BLKROSET;
|
||||
unsigned IOCTL_BLKRRPART = BLKRRPART;
|
||||
unsigned IOCTL_BLKFRASET = BLKFRASET;
|
||||
unsigned IOCTL_BLKFRAGET = BLKFRAGET;
|
||||
unsigned IOCTL_BLKSECTSET = BLKSECTSET;
|
||||
unsigned IOCTL_BLKSECTGET = BLKSECTGET;
|
||||
unsigned IOCTL_BLKSSZGET = BLKSSZGET;
|
||||
unsigned IOCTL_BLKBSZGET = BLKBSZGET;
|
||||
unsigned IOCTL_BLKBSZSET = BLKBSZSET;
|
||||
unsigned IOCTL_BLKGETSIZE64 = BLKGETSIZE64;
|
||||
unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ;
|
||||
unsigned IOCTL_CDROMEJECT = CDROMEJECT;
|
||||
unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user