sanitizer_stacktrace.cc: Cherry pick upstream r209879.

* sanitizer_common/sanitizer_stacktrace.cc: Cherry pick upstream
	r209879.
	* sanitizer_common/sanitizer_common.h: Likewise.
	* asan/asan_mapping.h: Likewise.
	* asan/asan_linux.cc: Likewise.
	* tsan/tsan_mman.cc: Cherry pick upstream r209744.
	* sanitizer_common/sanitizer_allocator.h: Likewise.

From-SVN: r211080
This commit is contained in:
Jakub Jelinek 2014-05-30 15:48:22 +02:00 committed by Jakub Jelinek
parent f9a8f6d9cb
commit ecd5543ffa
7 changed files with 58 additions and 30 deletions

View File

@ -1,3 +1,13 @@
2014-05-30 Jakub Jelinek <jakub@redhat.com>
* sanitizer_common/sanitizer_stacktrace.cc: Cherry pick upstream
r209879.
* sanitizer_common/sanitizer_common.h: Likewise.
* asan/asan_mapping.h: Likewise.
* asan/asan_linux.cc: Likewise.
* tsan/tsan_mman.cc: Cherry pick upstream r209744.
* sanitizer_common/sanitizer_allocator.h: Likewise.
2014-05-23 Marek Polacek <polacek@redhat.com>
* ubsan/ubsan_value.cc (getFloatValue): Handle 96-bit

View File

@ -186,6 +186,13 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_ESP];
# endif
#elif defined(__powerpc__) || defined(__powerpc64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
#elif defined(__sparc__)
ucontext_t *ucontext = (ucontext_t*)context;
uptr *stk_ptr;

View File

@ -85,6 +85,7 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
@ -107,6 +108,8 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
# else
# if defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif defined(__powerpc64__)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
# elif SANITIZER_MAC

View File

@ -196,14 +196,12 @@ template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
// Memory allocator statistics
enum AllocatorStat {
AllocatorStatMalloced,
AllocatorStatFreed,
AllocatorStatMmapped,
AllocatorStatUnmapped,
AllocatorStatAllocated,
AllocatorStatMapped,
AllocatorStatCount
};
typedef u64 AllocatorStatCounters[AllocatorStatCount];
typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
@ -212,16 +210,21 @@ class AllocatorStats {
internal_memset(this, 0, sizeof(*this));
}
void Add(AllocatorStat i, u64 v) {
void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
atomic_store(&stats_[i], v, memory_order_relaxed);
}
void Set(AllocatorStat i, u64 v) {
void Sub(AllocatorStat i, uptr v) {
v = atomic_load(&stats_[i], memory_order_relaxed) - v;
atomic_store(&stats_[i], v, memory_order_relaxed);
}
u64 Get(AllocatorStat i) const {
void Set(AllocatorStat i, uptr v) {
atomic_store(&stats_[i], v, memory_order_relaxed);
}
uptr Get(AllocatorStat i) const {
return atomic_load(&stats_[i], memory_order_relaxed);
}
@ -229,7 +232,7 @@ class AllocatorStats {
friend class AllocatorGlobalStats;
AllocatorStats *next_;
AllocatorStats *prev_;
atomic_uint64_t stats_[AllocatorStatCount];
atomic_uintptr_t stats_[AllocatorStatCount];
};
// Global stats, used for aggregation and querying.
@ -258,7 +261,7 @@ class AllocatorGlobalStats : public AllocatorStats {
}
void Get(AllocatorStatCounters s) const {
internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
for (;;) {
@ -268,6 +271,9 @@ class AllocatorGlobalStats : public AllocatorStats {
if (stats == this)
break;
}
// All stats must be positive.
for (int i = 0; i < AllocatorStatCount; i++)
s[i] = ((sptr)s[i]) > 0 ? s[i] : 1;
}
private:
@ -520,7 +526,7 @@ class SizeClassAllocator64 {
map_size += kUserMapSize;
CHECK_GE(region->mapped_user + map_size, end_idx);
MapWithCallback(region_beg + region->mapped_user, map_size);
stat->Add(AllocatorStatMmapped, map_size);
stat->Add(AllocatorStatMapped, map_size);
region->mapped_user += map_size;
}
uptr total_count = (region->mapped_user - beg_idx - size)
@ -839,7 +845,7 @@ class SizeClassAllocator32 {
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
"SizeClassAllocator32"));
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMmapped, kRegionSize);
stat->Add(AllocatorStatMapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
@ -905,7 +911,7 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
@ -920,7 +926,7 @@ struct SizeClassAllocatorLocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
@ -1031,8 +1037,8 @@ class LargeMmapAllocator {
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
stats.by_size_log[size_log]++;
stat->Add(AllocatorStatMalloced, map_size);
stat->Add(AllocatorStatMmapped, map_size);
stat->Add(AllocatorStatAllocated, map_size);
stat->Add(AllocatorStatMapped, map_size);
}
return reinterpret_cast<void*>(res);
}
@ -1050,8 +1056,8 @@ class LargeMmapAllocator {
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
stat->Add(AllocatorStatFreed, h->map_size);
stat->Add(AllocatorStatUnmapped, h->map_size);
stat->Sub(AllocatorStatAllocated, h->map_size);
stat->Sub(AllocatorStatMapped, h->map_size);
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);

View File

@ -26,7 +26,11 @@ struct StackTrace;
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
const uptr kCacheLineSize = 64;
#if defined(__powerpc__) || defined(__powerpc64__)
const uptr kCacheLineSize = 128;
#else
const uptr kCacheLineSize = 64;
#endif
const uptr kMaxPathLength = 512;

View File

@ -16,11 +16,13 @@
namespace __sanitizer {
uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
#ifdef __arm__
#if defined(__arm__)
// Cancel Thumb bit.
pc = pc & (~1);
#endif
#if defined(__sparc__)
#elif defined(__powerpc__) || defined(__powerpc64__)
// PCs are always 4 byte aligned.
return pc - 4;
#elif defined(__sparc__)
return pc - 8;
#else
return pc - 1;

View File

@ -215,19 +215,15 @@ using namespace __tsan;
extern "C" {
uptr __tsan_get_current_allocated_bytes() {
u64 stats[AllocatorStatCount];
uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
u64 m = stats[AllocatorStatMalloced];
u64 f = stats[AllocatorStatFreed];
return m >= f ? m - f : 1;
return stats[AllocatorStatAllocated];
}
uptr __tsan_get_heap_size() {
u64 stats[AllocatorStatCount];
uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
u64 m = stats[AllocatorStatMmapped];
u64 f = stats[AllocatorStatUnmapped];
return m >= f ? m - f : 1;
return stats[AllocatorStatMapped];
}
uptr __tsan_get_free_bytes() {