aboutsummaryrefslogtreecommitdiffstats
path: root/lib/hwasan
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-01-19 10:05:08 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-01-19 10:05:08 +0000
commit0646903fc1f75f6e605754621119473ee083f4a4 (patch)
tree57bce79a7423a054cccec23bdf6cd96e2d271b4a /lib/hwasan
parent005b7ed8f76756d94ef6266ded755ab7863cb936 (diff)
downloadsrc-0646903fc1f75f6e605754621119473ee083f4a4.tar.gz
src-0646903fc1f75f6e605754621119473ee083f4a4.zip
Vendor import of compiler-rt trunk r351319 (just before the release_80vendor/compiler-rt/compiler-rt-trunk-r351319vendor/compiler-rt/compiler-rt-release_80-r351543
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=343175 svn path=/vendor/compiler-rt/compiler-rt-release_80-r351543/; revision=343195; tag=vendor/compiler-rt/compiler-rt-release_80-r351543
Diffstat (limited to 'lib/hwasan')
-rw-r--r--lib/hwasan/CMakeLists.txt38
-rw-r--r--lib/hwasan/hwasan.cc271
-rw-r--r--lib/hwasan/hwasan.h36
-rw-r--r--lib/hwasan/hwasan_allocator.cc348
-rw-r--r--lib/hwasan/hwasan_allocator.h77
-rw-r--r--lib/hwasan/hwasan_checks.h80
-rw-r--r--lib/hwasan/hwasan_dynamic_shadow.cc30
-rw-r--r--lib/hwasan/hwasan_flags.h2
-rw-r--r--lib/hwasan/hwasan_flags.inc57
-rw-r--r--lib/hwasan/hwasan_interceptors.cc333
-rw-r--r--lib/hwasan/hwasan_interface_internal.h75
-rw-r--r--lib/hwasan/hwasan_linux.cc231
-rw-r--r--lib/hwasan/hwasan_mapping.h63
-rw-r--r--lib/hwasan/hwasan_memintrinsics.cc45
-rw-r--r--lib/hwasan/hwasan_new_delete.cc2
-rw-r--r--lib/hwasan/hwasan_poisoning.cc6
-rw-r--r--lib/hwasan/hwasan_poisoning.h2
-rw-r--r--lib/hwasan/hwasan_report.cc412
-rw-r--r--lib/hwasan/hwasan_report.h8
-rw-r--r--lib/hwasan/hwasan_thread.cc105
-rw-r--r--lib/hwasan/hwasan_thread.h62
-rw-r--r--lib/hwasan/hwasan_thread_list.cc15
-rw-r--r--lib/hwasan/hwasan_thread_list.h200
23 files changed, 1681 insertions, 817 deletions
diff --git a/lib/hwasan/CMakeLists.txt b/lib/hwasan/CMakeLists.txt
index 42bf4366f192..20ab94dc0446 100644
--- a/lib/hwasan/CMakeLists.txt
+++ b/lib/hwasan/CMakeLists.txt
@@ -7,9 +7,11 @@ set(HWASAN_RTL_SOURCES
hwasan_dynamic_shadow.cc
hwasan_interceptors.cc
hwasan_linux.cc
+ hwasan_memintrinsics.cc
hwasan_poisoning.cc
hwasan_report.cc
hwasan_thread.cc
+ hwasan_thread_list.cc
)
set(HWASAN_RTL_CXX_SOURCES
@@ -25,8 +27,12 @@ set(HWASAN_RTL_HEADERS
hwasan_mapping.h
hwasan_poisoning.h
hwasan_report.h
- hwasan_thread.h)
+ hwasan_thread.h
+ hwasan_thread_list.h
+ )
+set(HWASAN_DEFINITIONS)
+append_list_if(COMPILER_RT_HWASAN_WITH_INTERCEPTORS HWASAN_WITH_INTERCEPTORS=1 HWASAN_DEFINITIONS)
set(HWASAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF HWASAN_RTL_CFLAGS)
@@ -36,6 +42,14 @@ append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding HWASAN_RTL_CFLA
set(HWASAN_DYNAMIC_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS})
+if(ANDROID)
+# Put most Sanitizer shared libraries in the global group. For more details, see
+# android-changes-for-ndk-developers.md#changes-to-library-search-order
+ if (COMPILER_RT_HAS_Z_GLOBAL)
+ list(APPEND HWASAN_DYNAMIC_LINK_FLAGS -Wl,-z,global)
+ endif()
+endif()
+
set(HWASAN_DYNAMIC_CFLAGS ${HWASAN_RTL_CFLAGS})
append_list_if(COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC
-ftls-model=initial-exec HWASAN_DYNAMIC_CFLAGS)
@@ -47,7 +61,10 @@ append_list_if(COMPILER_RT_HAS_LIBDL dl HWASAN_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBRT rt HWASAN_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBM m HWASAN_DYNAMIC_LIBS)
append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread HWASAN_DYNAMIC_LIBS)
-append_list_if(COMPILER_RT_HAS_LIBLOG log HWASAN_DYNAMIC_LIBS)
+
+if (TARGET cxx-headers OR HAVE_LIBCXX)
+ set(HWASAN_DEPS cxx-headers)
+endif()
# Static runtime library.
add_compiler_rt_component(hwasan)
@@ -56,23 +73,31 @@ add_compiler_rt_object_libraries(RTHwasan
ARCHS ${HWASAN_SUPPORTED_ARCH}
SOURCES ${HWASAN_RTL_SOURCES}
ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS}
- CFLAGS ${HWASAN_RTL_CFLAGS})
+ CFLAGS ${HWASAN_RTL_CFLAGS}
+ DEFS ${HWASAN_DEFINITIONS}
+ DEPS ${HWASAN_DEPS})
add_compiler_rt_object_libraries(RTHwasan_cxx
ARCHS ${HWASAN_SUPPORTED_ARCH}
SOURCES ${HWASAN_RTL_CXX_SOURCES}
ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS}
- CFLAGS ${HWASAN_RTL_CFLAGS})
+ CFLAGS ${HWASAN_RTL_CFLAGS}
+ DEFS ${HWASAN_DEFINITIONS}
+ DEPS ${HWASAN_DEPS})
add_compiler_rt_object_libraries(RTHwasan_dynamic
ARCHS ${HWASAN_SUPPORTED_ARCH}
SOURCES ${HWASAN_RTL_SOURCES} ${HWASAN_RTL_CXX_SOURCES}
ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS}
- CFLAGS ${HWASAN_DYNAMIC_CFLAGS})
+ CFLAGS ${HWASAN_DYNAMIC_CFLAGS}
+ DEFS ${HWASAN_DEFINITIONS}
+ DEPS ${HWASAN_DEPS})
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/dummy.cc "")
add_compiler_rt_object_libraries(RTHwasan_dynamic_version_script_dummy
ARCHS ${HWASAN_SUPPORTED_ARCH}
SOURCES ${CMAKE_CURRENT_BINARY_DIR}/dummy.cc
- CFLAGS ${HWASAN_DYNAMIC_CFLAGS})
+ CFLAGS ${HWASAN_DYNAMIC_CFLAGS}
+ DEFS ${HWASAN_DEFINITIONS}
+ DEPS ${HWASAN_DEPS})
foreach(arch ${HWASAN_SUPPORTED_ARCH})
add_compiler_rt_runtime(clang_rt.hwasan
@@ -121,6 +146,7 @@ foreach(arch ${HWASAN_SUPPORTED_ARCH})
RTSanitizerCommonCoverage
RTSanitizerCommonSymbolizer
RTUbsan
+ RTUbsan_cxx
# The only purpose of RTHWAsan_dynamic_version_script_dummy is to
# carry a dependency of the shared runtime on the version script.
# Replacing it with a straightforward
diff --git a/lib/hwasan/hwasan.cc b/lib/hwasan/hwasan.cc
index 7dab8249e3f7..e2bfea5e4226 100644
--- a/lib/hwasan/hwasan.cc
+++ b/lib/hwasan/hwasan.cc
@@ -13,19 +13,20 @@
//===----------------------------------------------------------------------===//
#include "hwasan.h"
-#include "hwasan_mapping.h"
+#include "hwasan_checks.h"
#include "hwasan_poisoning.h"
#include "hwasan_report.h"
#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "ubsan/ubsan_flags.h"
#include "ubsan/ubsan_init.h"
@@ -36,17 +37,17 @@ using namespace __sanitizer;
namespace __hwasan {
void EnterSymbolizer() {
- HwasanThread *t = GetCurrentThread();
+ Thread *t = GetCurrentThread();
CHECK(t);
t->EnterSymbolizer();
}
void ExitSymbolizer() {
- HwasanThread *t = GetCurrentThread();
+ Thread *t = GetCurrentThread();
CHECK(t);
t->LeaveSymbolizer();
}
bool IsInSymbolizer() {
- HwasanThread *t = GetCurrentThread();
+ Thread *t = GetCurrentThread();
return t && t->InSymbolizer();
}
@@ -57,6 +58,7 @@ Flags *flags() {
}
int hwasan_inited = 0;
+int hwasan_shadow_inited = 0;
bool hwasan_init_is_running;
int hwasan_report_count = 0;
@@ -86,7 +88,18 @@ static void InitializeFlags() {
cf.check_printf = false;
cf.intercept_tls_get_addr = true;
cf.exitcode = 99;
+ // Sigtrap is used in error reporting.
cf.handle_sigtrap = kHandleSignalExclusive;
+
+#if SANITIZER_ANDROID
+ // Let platform handle other signals. It is better at reporting them then we
+ // are.
+ cf.handle_segv = kHandleSignalNo;
+ cf.handle_sigbus = kHandleSignalNo;
+ cf.handle_abort = kHandleSignalNo;
+ cf.handle_sigill = kHandleSignalNo;
+ cf.handle_sigfpe = kHandleSignalNo;
+#endif
OverrideCommonFlags(cf);
}
@@ -119,7 +132,8 @@ static void InitializeFlags() {
#if HWASAN_CONTAINS_UBSAN
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
#endif
- VPrintf(1, "HWASAN_OPTIONS: %s\n", hwasan_options ? hwasan_options : "<empty>");
+ VPrintf(1, "HWASAN_OPTIONS: %s\n",
+ hwasan_options ? hwasan_options : "<empty>");
InitializeCommonFlags();
@@ -130,8 +144,13 @@ static void InitializeFlags() {
void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
void *context, bool request_fast_unwind) {
- HwasanThread *t = GetCurrentThread();
- if (!t || !StackTrace::WillUseFastUnwind(request_fast_unwind)) {
+ Thread *t = GetCurrentThread();
+ if (!t) {
+ // the thread is still being created.
+ stack->size = 0;
+ return;
+ }
+ if (!StackTrace::WillUseFastUnwind(request_fast_unwind)) {
// Block reports from our interceptors during _Unwind_Backtrace.
SymbolizerScope sym_scope;
return stack->Unwind(max_s, pc, bp, context, 0, 0, request_fast_unwind);
@@ -140,11 +159,6 @@ void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
request_fast_unwind);
}
-void PrintWarning(uptr pc, uptr bp) {
- GET_FATAL_STACK_TRACE_PC_BP(pc, bp);
- ReportInvalidAccess(&stack, 0);
-}
-
static void HWAsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
@@ -153,6 +167,84 @@ static void HWAsanCheckFailed(const char *file, int line, const char *cond,
Die();
}
+static constexpr uptr kMemoryUsageBufferSize = 4096;
+
+static void HwasanFormatMemoryUsage(InternalScopedString &s) {
+ HwasanThreadList &thread_list = hwasanThreadList();
+ auto thread_stats = thread_list.GetThreadStats();
+ auto *sds = StackDepotGetStats();
+ AllocatorStatCounters asc;
+ GetAllocatorStats(asc);
+ s.append(
+ "HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
+ " thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
+ " heap: %zd",
+ internal_getpid(), GetRSS(), thread_stats.n_live_threads,
+ thread_stats.total_stack_size,
+ thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(),
+ sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]);
+}
+
+#if SANITIZER_ANDROID
+static char *memory_usage_buffer = nullptr;
+
+#define PR_SET_VMA 0x53564d41
+#define PR_SET_VMA_ANON_NAME 0
+
+static void InitMemoryUsage() {
+ memory_usage_buffer =
+ (char *)MmapOrDie(kMemoryUsageBufferSize, "memory usage string");
+ CHECK(memory_usage_buffer);
+ memory_usage_buffer[0] = '\0';
+ CHECK(internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
+ (uptr)memory_usage_buffer, kMemoryUsageBufferSize,
+ (uptr)memory_usage_buffer) == 0);
+}
+
+void UpdateMemoryUsage() {
+ if (!flags()->export_memory_stats)
+ return;
+ if (!memory_usage_buffer)
+ InitMemoryUsage();
+ InternalScopedString s(kMemoryUsageBufferSize);
+ HwasanFormatMemoryUsage(s);
+ internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1);
+ memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0';
+}
+#else
+void UpdateMemoryUsage() {}
+#endif
+
+struct FrameDescription {
+ uptr PC;
+ const char *Descr;
+};
+
+struct FrameDescriptionArray {
+ FrameDescription *beg, *end;
+};
+
+static InternalMmapVectorNoCtor<FrameDescriptionArray> AllFrames;
+
+void InitFrameDescriptors(uptr b, uptr e) {
+ FrameDescription *beg = reinterpret_cast<FrameDescription *>(b);
+ FrameDescription *end = reinterpret_cast<FrameDescription *>(e);
+ if (beg == end)
+ return;
+ AllFrames.push_back({beg, end});
+ if (Verbosity())
+ for (FrameDescription *frame_descr = beg; frame_descr < end; frame_descr++)
+ Printf("Frame: %p %s\n", frame_descr->PC, frame_descr->Descr);
+}
+
+const char *GetStackFrameDescr(uptr pc) {
+ for (uptr i = 0, n = AllFrames.size(); i < n; i++)
+ for (auto p = AllFrames[i].beg; p < AllFrames[i].end; p++)
+ if (p->PC == pc)
+ return p->Descr;
+ return nullptr;
+}
+
} // namespace __hwasan
// Interface.
@@ -161,6 +253,20 @@ using namespace __hwasan;
uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol.
+void __hwasan_shadow_init() {
+ if (hwasan_shadow_inited) return;
+ if (!InitShadow()) {
+ Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
+ DumpProcessMap();
+ Die();
+ }
+ hwasan_shadow_inited = 1;
+}
+
+void __hwasan_init_frames(uptr beg, uptr end) {
+ InitFrameDescriptors(beg, end);
+}
+
void __hwasan_init() {
CHECK(!hwasan_init_is_running);
if (hwasan_inited) return;
@@ -177,18 +283,20 @@ void __hwasan_init() {
__sanitizer_set_report_path(common_flags()->log_path);
+ AndroidTestTlsSlot();
+
DisableCoreDumperIfNecessary();
- if (!InitShadow()) {
- Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
- if (HWASAN_FIXED_MAPPING) {
- Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
- Printf("FATAL: Disabling ASLR is known to cause this error.\n");
- Printf("FATAL: If running under GDB, try "
- "'set disable-randomization off'.\n");
- }
- DumpProcessMap();
- Die();
- }
+
+ __hwasan_shadow_init();
+
+ InitThreads();
+ hwasanThreadList().CreateCurrentThread();
+
+ MadviseShadow();
+
+ SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
+ // This may call libc -> needs initialized shadow.
+ AndroidLogInit();
InitializeInterceptors();
InstallDeadlySignalHandlers(HwasanOnDeadlySignal);
@@ -198,14 +306,11 @@ void __hwasan_init() {
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
- HwasanTSDInit(HwasanTSDDtor);
+ HwasanTSDInit();
+ HwasanTSDThreadInit();
HwasanAllocatorInit();
- HwasanThread *main_thread = HwasanThread::Create(nullptr, nullptr);
- SetCurrentThread(main_thread);
- main_thread->ThreadStart();
-
#if HWASAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
@@ -216,9 +321,14 @@ void __hwasan_init() {
hwasan_inited = 1;
}
-void __hwasan_print_shadow(const void *x, uptr size) {
- // FIXME:
- Printf("FIXME: __hwasan_print_shadow unimplemented\n");
+void __hwasan_print_shadow(const void *p, uptr sz) {
+ uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
+ uptr shadow_first = MemToShadow(ptr_raw);
+ uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
+ Printf("HWASan shadow map for %zx .. %zx (pointer tag %x)\n", ptr_raw,
+ ptr_raw + sz, GetTagFromPointer((uptr)p));
+ for (uptr s = shadow_first; s <= shadow_last; ++s)
+ Printf(" %zx: %x\n", ShadowToMem(s), *(tag_t *)s);
}
sptr __hwasan_test_shadow(const void *p, uptr sz) {
@@ -227,12 +337,12 @@ sptr __hwasan_test_shadow(const void *p, uptr sz) {
tag_t ptr_tag = GetTagFromPointer((uptr)p);
if (ptr_tag == 0)
return -1;
- uptr ptr_raw = GetAddressFromPointer((uptr)p);
- uptr shadow_first = MEM_TO_SHADOW(ptr_raw);
- uptr shadow_last = MEM_TO_SHADOW(ptr_raw + sz - 1);
+ uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
+ uptr shadow_first = MemToShadow(ptr_raw);
+ uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
for (uptr s = shadow_first; s <= shadow_last; ++s)
if (*(tag_t*)s != ptr_tag)
- return SHADOW_TO_MEM(s) - ptr_raw;
+ return ShadowToMem(s) - ptr_raw;
return -1;
}
@@ -255,63 +365,6 @@ void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
*p = x;
}
-template<unsigned X>
-__attribute__((always_inline))
-static void SigTrap(uptr p) {
-#if defined(__aarch64__)
- (void)p;
- // 0x900 is added to do not interfere with the kernel use of lower values of
- // brk immediate.
- // FIXME: Add a constraint to put the pointer into x0, the same as x86 branch.
- asm("brk %0\n\t" ::"n"(0x900 + X));
-#elif defined(__x86_64__)
- // INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
- // total. The pointer is passed via rdi.
- // 0x40 is added as a safeguard, to help distinguish our trap from others and
- // to avoid 0 offsets in the command (otherwise it'll be reduced to a
- // different nop command, the three bytes one).
- asm volatile(
- "int3\n"
- "nopl %c0(%%rax)\n"
- :: "n"(0x40 + X), "D"(p));
-#else
- // FIXME: not always sigill.
- __builtin_trap();
-#endif
- // __builtin_unreachable();
-}
-
-enum class ErrorAction { Abort, Recover };
-enum class AccessType { Load, Store };
-
-template <ErrorAction EA, AccessType AT, unsigned LogSize>
-__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
- tag_t ptr_tag = GetTagFromPointer(p);
- uptr ptr_raw = p & ~kAddressTagMask;
- tag_t mem_tag = *(tag_t *)MEM_TO_SHADOW(ptr_raw);
- if (UNLIKELY(ptr_tag != mem_tag)) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + LogSize>(p);
- if (EA == ErrorAction::Abort) __builtin_unreachable();
- }
-}
-
-template <ErrorAction EA, AccessType AT>
-__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
- uptr sz) {
- CHECK_NE(0, sz);
- tag_t ptr_tag = GetTagFromPointer(p);
- uptr ptr_raw = p & ~kAddressTagMask;
- tag_t *shadow_first = (tag_t *)MEM_TO_SHADOW(ptr_raw);
- tag_t *shadow_last = (tag_t *)MEM_TO_SHADOW(ptr_raw + sz - 1);
- for (tag_t *t = shadow_first; t <= shadow_last; ++t)
- if (UNLIKELY(ptr_tag != *t)) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + 0xf>(p);
- if (EA == ErrorAction::Abort) __builtin_unreachable();
- }
-}
-
void __hwasan_loadN(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
}
@@ -392,10 +445,38 @@ void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
TagMemoryAligned(p, sz, tag);
}
+uptr __hwasan_tag_pointer(uptr p, u8 tag) {
+ return AddTagToPointer(p, tag);
+}
+
+void __hwasan_handle_longjmp(const void *sp_dst) {
+ uptr dst = (uptr)sp_dst;
+ // HWASan does not support tagged SP.
+ CHECK(GetTagFromPointer(dst) == 0);
+
+ uptr sp = (uptr)__builtin_frame_address(0);
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (dst < sp || dst - sp > kMaxExpectedCleanupSize) {
+ Report(
+ "WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: "
+ "stack top: %p; target %p; distance: %p (%zd)\n"
+ "False positive error reports may follow\n",
+ (void *)sp, (void *)dst, dst - sp);
+ return;
+ }
+ TagMemory(sp, dst - sp, 0);
+}
+
+void __hwasan_print_memory_usage() {
+ InternalScopedString s(kMemoryUsageBufferSize);
+ HwasanFormatMemoryUsage(s);
+ Printf("%s\n", s.data());
+}
+
static const u8 kFallbackTag = 0xBB;
u8 __hwasan_generate_tag() {
- HwasanThread *t = GetCurrentThread();
+ Thread *t = GetCurrentThread();
if (!t) return kFallbackTag;
return t->GenerateRandomTag();
}
diff --git a/lib/hwasan/hwasan.h b/lib/hwasan/hwasan.h
index 47d1d057a0dd..ce9e904c5c69 100644
--- a/lib/hwasan/hwasan.h
+++ b/lib/hwasan/hwasan.h
@@ -30,6 +30,10 @@
# define HWASAN_CONTAINS_UBSAN CAN_SANITIZE_UB
#endif
+#ifndef HWASAN_WITH_INTERCEPTORS
+#define HWASAN_WITH_INTERCEPTORS 0
+#endif
+
typedef u8 tag_t;
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
@@ -37,16 +41,21 @@ typedef u8 tag_t;
const unsigned kAddressTagShift = 56;
const uptr kAddressTagMask = 0xFFUL << kAddressTagShift;
+// Minimal alignment of the shadow base address. Determines the space available
+// for threads and stack histories. This is an ABI constant.
+const unsigned kShadowBaseAlignment = 32;
+
static inline tag_t GetTagFromPointer(uptr p) {
return p >> kAddressTagShift;
}
-static inline uptr GetAddressFromPointer(uptr p) {
- return p & ~kAddressTagMask;
+static inline uptr UntagAddr(uptr tagged_addr) {
+ return tagged_addr & ~kAddressTagMask;
}
-static inline void * GetAddressFromPointer(const void *p) {
- return (void *)((uptr)p & ~kAddressTagMask);
+static inline void *UntagPtr(const void *tagged_ptr) {
+ return reinterpret_cast<void *>(
+ UntagAddr(reinterpret_cast<uptr>(tagged_ptr)));
}
static inline uptr AddTagToPointer(uptr p, tag_t tag) {
@@ -61,12 +70,13 @@ extern int hwasan_report_count;
bool ProtectRange(uptr beg, uptr end);
bool InitShadow();
+void InitThreads();
+void MadviseShadow();
char *GetProcSelfMaps();
void InitializeInterceptors();
void HwasanAllocatorInit();
void HwasanAllocatorThreadFinish();
-void HwasanDeallocate(StackTrace *stack, void *ptr);
void *hwasan_malloc(uptr size, StackTrace *stack);
void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack);
@@ -77,11 +87,13 @@ void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack);
int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack);
+void hwasan_free(void *ptr, StackTrace *stack);
void InstallTrapHandler();
void InstallAtExitHandler();
const char *GetStackOriginDescr(u32 id, uptr *pc);
+const char *GetStackFrameDescr(uptr pc);
void EnterSymbolizer();
void ExitSymbolizer();
@@ -92,8 +104,6 @@ struct SymbolizerScope {
~SymbolizerScope() { ExitSymbolizer(); }
};
-void PrintWarning(uptr pc, uptr bp);
-
void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
void *context, bool request_fast_unwind);
@@ -135,13 +145,17 @@ class ScopedThreadLocalStateBackup {
u64 va_arg_overflow_size_tls;
};
-void HwasanTSDInit(void (*destructor)(void *tsd));
-void *HwasanTSDGet();
-void HwasanTSDSet(void *tsd);
-void HwasanTSDDtor(void *tsd);
+void HwasanTSDInit();
+void HwasanTSDThreadInit();
void HwasanOnDeadlySignal(int signo, void *info, void *context);
+void UpdateMemoryUsage();
+
+void AppendToErrorMessageBuffer(const char *buffer);
+
+void AndroidTestTlsSlot();
+
} // namespace __hwasan
#define HWASAN_MALLOC_HOOK(ptr, size) \
diff --git a/lib/hwasan/hwasan_allocator.cc b/lib/hwasan/hwasan_allocator.cc
index c2b9b0b69589..8487ed7e1e55 100644
--- a/lib/hwasan/hwasan_allocator.cc
+++ b/lib/hwasan/hwasan_allocator.cc
@@ -12,10 +12,6 @@
// HWAddressSanitizer allocator.
//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_allocator.h"
-#include "sanitizer_common/sanitizer_allocator_checks.h"
-#include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -23,30 +19,53 @@
#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
#include "hwasan_thread.h"
-#include "hwasan_poisoning.h"
+#include "hwasan_report.h"
+
+#if HWASAN_WITH_INTERCEPTORS
+DEFINE_REAL(void *, realloc, void *ptr, uptr size)
+DEFINE_REAL(void, free, void *ptr)
+#endif
namespace __hwasan {
-enum {
- CHUNK_INVALID = 0,
- CHUNK_FREE = 1,
- CHUNK_ALLOCATED = 2
-};
+static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static SpinMutex fallback_mutex;
+static atomic_uint8_t hwasan_allocator_tagging_enabled;
+
+static const tag_t kFallbackAllocTag = 0xBB;
+static const tag_t kFallbackFreeTag = 0xBC;
-struct Metadata {
- u64 state : 2;
- u64 requested_size : 62;
- u32 alloc_context_id;
- u32 free_context_id;
+enum RightAlignMode {
+ kRightAlignNever,
+ kRightAlignSometimes,
+ kRightAlignAlways
};
-bool HwasanChunkView::IsValid() const {
- return metadata_ && metadata_->state != CHUNK_INVALID;
-}
+// These two variables are initialized from flags()->malloc_align_right
+// in HwasanAllocatorInit and are never changed afterwards.
+static RightAlignMode right_align_mode = kRightAlignNever;
+static bool right_align_8 = false;
+
+// Initialized in HwasanAllocatorInit, an never changed.
+static ALIGNED(16) u8 tail_magic[kShadowAlignment];
+
bool HwasanChunkView::IsAllocated() const {
- return metadata_ && metadata_->state == CHUNK_ALLOCATED;
+ return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
+}
+
+// Aligns the 'addr' right to the granule boundary.
+static uptr AlignRight(uptr addr, uptr requested_size) {
+ uptr tail_size = requested_size % kShadowAlignment;
+ if (!tail_size) return addr;
+ if (right_align_8)
+ return tail_size > 8 ? addr : addr + 8;
+ return addr + kShadowAlignment - tail_size;
}
+
uptr HwasanChunkView::Beg() const {
+ if (metadata_ && metadata_->right_aligned)
+ return AlignRight(block_, metadata_->requested_size);
return block_;
}
uptr HwasanChunkView::End() const {
@@ -58,88 +77,79 @@ uptr HwasanChunkView::UsedSize() const {
u32 HwasanChunkView::GetAllocStackId() const {
return metadata_->alloc_context_id;
}
-u32 HwasanChunkView::GetFreeStackId() const {
- return metadata_->free_context_id;
-}
-
-struct HwasanMapUnmapCallback {
- void OnMap(uptr p, uptr size) const {}
- void OnUnmap(uptr p, uptr size) const {
- // We are about to unmap a chunk of user memory.
- // It can return as user-requested mmap() or another thread stack.
- // Make it accessible with zero-tagged pointer.
- TagMemory(p, size, 0);
- }
-};
-#if !defined(__aarch64__) && !defined(__x86_64__)
-#error Unsupported platform
-#endif
-
-static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
-static const uptr kRegionSizeLog = 20;
-static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
-typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
-
-struct AP32 {
- static const uptr kSpaceBeg = 0;
- static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
- static const uptr kMetadataSize = sizeof(Metadata);
- typedef __sanitizer::CompactSizeClassMap SizeClassMap;
- static const uptr kRegionSizeLog = __hwasan::kRegionSizeLog;
- typedef __hwasan::ByteMap ByteMap;
- typedef HwasanMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
-};
-typedef SizeClassAllocator32<AP32> PrimaryAllocator;
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator<HwasanMapUnmapCallback> SecondaryAllocator;
-typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
+uptr HwasanChunkView::ActualSize() const {
+ return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
+}
-static Allocator allocator;
-static AllocatorCache fallback_allocator_cache;
-static SpinMutex fallback_mutex;
-static atomic_uint8_t hwasan_allocator_tagging_enabled;
+bool HwasanChunkView::FromSmallHeap() const {
+ return allocator.FromPrimary(reinterpret_cast<void *>(block_));
+}
-static const tag_t kFallbackAllocTag = 0xBB;
-static const tag_t kFallbackFreeTag = 0xBC;
+void GetAllocatorStats(AllocatorStatCounters s) {
+ allocator.GetStats(s);
+}
void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+ switch (flags()->malloc_align_right) {
+ case 0: break;
+ case 1:
+ right_align_mode = kRightAlignSometimes;
+ right_align_8 = false;
+ break;
+ case 2:
+ right_align_mode = kRightAlignAlways;
+ right_align_8 = false;
+ break;
+ case 8:
+ right_align_mode = kRightAlignSometimes;
+ right_align_8 = true;
+ break;
+ case 9:
+ right_align_mode = kRightAlignAlways;
+ right_align_8 = true;
+ break;
+ default:
+ Report("ERROR: unsupported value of malloc_align_right flag: %d\n",
+ flags()->malloc_align_right);
+ Die();
+ }
+ for (uptr i = 0; i < kShadowAlignment; i++)
+ tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
}
-AllocatorCache *GetAllocatorCache(HwasanThreadLocalMallocStorage *ms) {
- CHECK(ms);
- CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
- return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
+void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
+ allocator.SwallowCache(cache);
}
-void HwasanThreadLocalMallocStorage::CommitBack() {
- allocator.SwallowCache(GetAllocatorCache(this));
+static uptr TaggedSize(uptr size) {
+ if (!size) size = 1;
+ uptr new_size = RoundUpTo(size, kShadowAlignment);
+ CHECK_GE(new_size, size);
+ return new_size;
}
-static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
- bool zeroise) {
- alignment = Max(alignment, kShadowAlignment);
- size = RoundUpTo(size, kShadowAlignment);
-
- if (size > kMaxAllowedMallocSize) {
+static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
+ bool zeroise) {
+ if (orig_size > kMaxAllowedMallocSize) {
if (AllocatorMayReturnNull()) {
Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
- size);
+ orig_size);
return nullptr;
}
- ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack);
+ ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
}
- HwasanThread *t = GetCurrentThread();
+
+ alignment = Max(alignment, kShadowAlignment);
+ uptr size = TaggedSize(orig_size);
+ Thread *t = GetCurrentThread();
void *allocated;
if (t) {
- AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
- allocated = allocator.Allocate(cache, size, alignment);
+ allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
@@ -153,11 +163,18 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
}
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
- meta->state = CHUNK_ALLOCATED;
- meta->requested_size = size;
+ meta->requested_size = static_cast<u32>(orig_size);
meta->alloc_context_id = StackDepotPut(*stack);
- if (zeroise)
+ meta->right_aligned = false;
+ if (zeroise) {
internal_memset(allocated, 0, size);
+ } else if (flags()->max_malloc_fill_size > 0) {
+ uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
+ internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
+ }
+ if (!right_align_mode)
+ internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
+ size - orig_size);
void *user_ptr = allocated;
if (flags()->tag_in_malloc &&
@@ -165,74 +182,101 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
user_ptr = (void *)TagMemoryAligned(
(uptr)user_ptr, size, t ? t->GenerateRandomTag() : kFallbackAllocTag);
+ if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) &&
+ right_align_mode) {
+ uptr as_uptr = reinterpret_cast<uptr>(user_ptr);
+ if (right_align_mode == kRightAlignAlways ||
+ GetTagFromPointer(as_uptr) & 1) { // use a tag bit as a random bit.
+ user_ptr = reinterpret_cast<void *>(AlignRight(as_uptr, orig_size));
+ meta->right_aligned = 1;
+ }
+ }
+
HWASAN_MALLOC_HOOK(user_ptr, size);
return user_ptr;
}
-void HwasanDeallocate(StackTrace *stack, void *user_ptr) {
- CHECK(user_ptr);
- HWASAN_FREE_HOOK(user_ptr);
+static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
+ CHECK(tagged_ptr);
+ tag_t ptr_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
+ tag_t mem_tag = *reinterpret_cast<tag_t *>(
+ MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
+ return ptr_tag == mem_tag;
+}
+
+static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
+ CHECK(tagged_ptr);
+ HWASAN_FREE_HOOK(tagged_ptr);
+
+ if (!PointerAndMemoryTagsMatch(tagged_ptr))
+ ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+
+ void *untagged_ptr = UntagPtr(tagged_ptr);
+ void *aligned_ptr = reinterpret_cast<void *>(
+ RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
+ uptr orig_size = meta->requested_size;
+ u32 free_context_id = StackDepotPut(*stack);
+ u32 alloc_context_id = meta->alloc_context_id;
+
+ // Check tail magic.
+ uptr tagged_size = TaggedSize(orig_size);
+ if (flags()->free_checks_tail_magic && !right_align_mode && orig_size) {
+ uptr tail_size = tagged_size - orig_size;
+ CHECK_LT(tail_size, kShadowAlignment);
+ void *tail_beg = reinterpret_cast<void *>(
+ reinterpret_cast<uptr>(aligned_ptr) + orig_size);
+ if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
+ ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
+ orig_size, tail_size, tail_magic);
+ }
- void *p = GetAddressFromPointer(user_ptr);
- Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
- uptr size = meta->requested_size;
- meta->state = CHUNK_FREE;
meta->requested_size = 0;
- meta->free_context_id = StackDepotPut(*stack);
+ meta->alloc_context_id = 0;
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
- HwasanThread *t = GetCurrentThread();
+ Thread *t = GetCurrentThread();
+ if (flags()->max_free_fill_size > 0) {
+ uptr fill_size =
+ Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
+ internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
+ }
if (flags()->tag_in_free &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
- TagMemoryAligned((uptr)p, size,
+ TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
t ? t->GenerateRandomTag() : kFallbackFreeTag);
if (t) {
- AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
- allocator.Deallocate(cache, p);
+ allocator.Deallocate(t->allocator_cache(), aligned_ptr);
+ if (auto *ha = t->heap_allocations())
+ ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
+ free_context_id, static_cast<u32>(orig_size)});
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
- allocator.Deallocate(cache, p);
+ allocator.Deallocate(cache, aligned_ptr);
}
}
-void *HwasanReallocate(StackTrace *stack, void *user_old_p, uptr new_size,
- uptr alignment) {
- alignment = Max(alignment, kShadowAlignment);
- new_size = RoundUpTo(new_size, kShadowAlignment);
-
- void *old_p = GetAddressFromPointer(user_old_p);
- Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
- uptr old_size = meta->requested_size;
- uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
- if (new_size <= actually_allocated_size) {
- // We are not reallocating here.
- // FIXME: update stack trace for the allocation?
- meta->requested_size = new_size;
- if (!atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
- return user_old_p;
- if (flags()->retag_in_realloc) {
- HwasanThread *t = GetCurrentThread();
- return (void *)TagMemoryAligned(
- (uptr)old_p, new_size,
- t ? t->GenerateRandomTag() : kFallbackAllocTag);
- }
- if (new_size > old_size) {
- tag_t tag = GetTagFromPointer((uptr)user_old_p);
- TagMemoryAligned((uptr)old_p + old_size, new_size - old_size, tag);
- }
- return user_old_p;
+static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
+ uptr new_size, uptr alignment) {
+ if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
+ ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
+
+ void *tagged_ptr_new =
+ HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
+ if (tagged_ptr_old && tagged_ptr_new) {
+ void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
+ internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
+ Min(new_size, static_cast<uptr>(meta->requested_size)));
+ HwasanDeallocate(stack, tagged_ptr_old);
}
- uptr memcpy_size = Min(new_size, old_size);
- void *new_p = HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
- if (new_p) {
- internal_memcpy(new_p, old_p, memcpy_size);
- HwasanDeallocate(stack, old_p);
- }
- return new_p;
+ return tagged_ptr_new;
}
-void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
if (AllocatorMayReturnNull())
return nullptr;
@@ -250,12 +294,18 @@ HwasanChunkView FindHeapChunkByAddress(uptr address) {
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
}
-static uptr AllocationSize(const void *user_ptr) {
- const void *p = GetAddressFromPointer(user_ptr);
- if (!p) return 0;
- const void *beg = allocator.GetBlockBegin(p);
- if (beg != p) return 0;
- Metadata *b = (Metadata *)allocator.GetMetaData(p);
+static uptr AllocationSize(const void *tagged_ptr) {
+ const void *untagged_ptr = UntagPtr(tagged_ptr);
+ if (!untagged_ptr) return 0;
+ const void *beg = allocator.GetBlockBegin(untagged_ptr);
+ Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
+ if (b->right_aligned) {
+ if (beg != reinterpret_cast<void *>(RoundDownTo(
+ reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
+ return 0;
+ } else {
+ if (beg != untagged_ptr) return 0;
+ }
return b->requested_size;
}
@@ -270,6 +320,14 @@ void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
if (!ptr)
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
+
+#if HWASAN_WITH_INTERCEPTORS
+ // A tag of 0 means that this is a system allocator allocation, so we must use
+ // the system allocator to realloc it.
+ if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0)
+ return REAL(realloc)(ptr, size);
+#endif
+
if (size == 0) {
HwasanDeallocate(stack, ptr);
return nullptr;
@@ -331,6 +389,17 @@ int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0;
}
+void hwasan_free(void *ptr, StackTrace *stack) {
+#if HWASAN_WITH_INTERCEPTORS
+ // A tag of 0 means that this is a system allocator allocation, so we must use
+ // the system allocator to free it.
+ if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0)
+ return REAL(free)(ptr);
+#endif
+
+ return HwasanDeallocate(stack, ptr);
+}
+
} // namespace __hwasan
using namespace __hwasan;
@@ -340,6 +409,15 @@ void __hwasan_enable_allocator_tagging() {
}
void __hwasan_disable_allocator_tagging() {
+#if HWASAN_WITH_INTERCEPTORS
+ // Allocator tagging must be enabled for the system allocator fallback to work
+ // correctly. This means that we can't disable it at runtime if it was enabled
+ // at startup since that might result in our deallocations going to the system
+ // allocator. If tagging was disabled at startup we avoid this problem by
+ // disabling the fallback altogether.
+ CHECK(flags()->disable_allocator_tagging);
+#endif
+
atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
}
diff --git a/lib/hwasan/hwasan_allocator.h b/lib/hwasan/hwasan_allocator.h
index d025112e9773..6ab722fa6bef 100644
--- a/lib/hwasan/hwasan_allocator.h
+++ b/lib/hwasan/hwasan_allocator.h
@@ -1,4 +1,4 @@
-//===-- hwasan_allocator.h ----------------------------------------*- C++ -*-===//
+//===-- hwasan_allocator.h --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,35 +14,73 @@
#ifndef HWASAN_ALLOCATOR_H
#define HWASAN_ALLOCATOR_H
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_ring_buffer.h"
+#include "hwasan_poisoning.h"
+
+#if !defined(__aarch64__) && !defined(__x86_64__)
+#error Unsupported platform
+#endif
+
+#if HWASAN_WITH_INTERCEPTORS
+DECLARE_REAL(void *, realloc, void *ptr, uptr size)
+DECLARE_REAL(void, free, void *ptr)
+#endif
namespace __hwasan {
-struct HwasanThreadLocalMallocStorage {
- uptr quarantine_cache[16];
- // Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
- ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
- void CommitBack();
+struct Metadata {
+ u32 requested_size : 31; // sizes are < 2G.
+ u32 right_aligned : 1;
+ u32 alloc_context_id;
+};
- private:
- // These objects are allocated via mmap() and are zero-initialized.
- HwasanThreadLocalMallocStorage() {}
+struct HwasanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
+ void OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // It can return as user-requested mmap() or another thread stack.
+ // Make it accessible with zero-tagged pointer.
+ TagMemory(p, size, 0);
+ }
};
-struct Metadata;
+static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+
+struct AP64 {
+ static const uptr kSpaceBeg = ~0ULL;
+ static const uptr kSpaceSize = 0x2000000000ULL;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef HwasanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<HwasanMapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+void AllocatorSwallowThreadLocalCache(AllocatorCache *cache);
class HwasanChunkView {
public:
HwasanChunkView() : block_(0), metadata_(nullptr) {}
HwasanChunkView(uptr block, Metadata *metadata)
: block_(block), metadata_(metadata) {}
- bool IsValid() const; // Checks if it points to a valid allocated chunk
bool IsAllocated() const; // Checks if the memory is currently allocated
uptr Beg() const; // First byte of user memory
uptr End() const; // Last byte of user memory
uptr UsedSize() const; // Size requested by the user
+ uptr ActualSize() const; // Size allocated by the allocator.
u32 GetAllocStackId() const;
- u32 GetFreeStackId() const;
+ bool FromSmallHeap() const;
private:
uptr block_;
Metadata *const metadata_;
@@ -50,6 +88,21 @@ class HwasanChunkView {
HwasanChunkView FindHeapChunkByAddress(uptr address);
+// Information about one (de)allocation that happened in the past.
+// These are recorded in a thread-local ring buffer.
+// TODO: this is currently 24 bytes (20 bytes + alignment).
+// Compress it to 16 bytes or extend it to be more useful.
+struct HeapAllocationRecord {
+ uptr tagged_addr;
+ u32 alloc_context_id;
+ u32 free_context_id;
+ u32 requested_size;
+};
+
+typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
+
+void GetAllocatorStats(AllocatorStatCounters s);
+
} // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H
diff --git a/lib/hwasan/hwasan_checks.h b/lib/hwasan/hwasan_checks.h
new file mode 100644
index 000000000000..688b5e2bed82
--- /dev/null
+++ b/lib/hwasan/hwasan_checks.h
@@ -0,0 +1,80 @@
+//===-- hwasan_checks.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_CHECKS_H
+#define HWASAN_CHECKS_H
+
+#include "hwasan_mapping.h"
+
+namespace __hwasan {
+template <unsigned X>
+__attribute__((always_inline)) static void SigTrap(uptr p) {
+#if defined(__aarch64__)
+ (void)p;
+ // 0x900 is added to do not interfere with the kernel use of lower values of
+ // brk immediate.
+ // FIXME: Add a constraint to put the pointer into x0, the same as x86 branch.
+ asm("brk %0\n\t" ::"n"(0x900 + X));
+#elif defined(__x86_64__)
+ // INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
+ // total. The pointer is passed via rdi.
+ // 0x40 is added as a safeguard, to help distinguish our trap from others and
+ // to avoid 0 offsets in the command (otherwise it'll be reduced to a
+ // different nop command, the three bytes one).
+ asm volatile(
+ "int3\n"
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "D"(p));
+#else
+ // FIXME: not always sigill.
+ __builtin_trap();
+#endif
+ // __builtin_unreachable();
+}
+
+enum class ErrorAction { Abort, Recover };
+enum class AccessType { Load, Store };
+
+template <ErrorAction EA, AccessType AT, unsigned LogSize>
+__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
+ tag_t ptr_tag = GetTagFromPointer(p);
+ uptr ptr_raw = p & ~kAddressTagMask;
+ tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
+ if (UNLIKELY(ptr_tag != mem_tag)) {
+ SigTrap<0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + LogSize>(p);
+ if (EA == ErrorAction::Abort)
+ __builtin_unreachable();
+ }
+}
+
+template <ErrorAction EA, AccessType AT>
+__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
+ uptr sz) {
+ if (sz == 0)
+ return;
+ tag_t ptr_tag = GetTagFromPointer(p);
+ uptr ptr_raw = p & ~kAddressTagMask;
+ tag_t *shadow_first = (tag_t *)MemToShadow(ptr_raw);
+ tag_t *shadow_last = (tag_t *)MemToShadow(ptr_raw + sz - 1);
+ for (tag_t *t = shadow_first; t <= shadow_last; ++t)
+ if (UNLIKELY(ptr_tag != *t)) {
+ SigTrap<0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + 0xf>(p);
+ if (EA == ErrorAction::Abort)
+ __builtin_unreachable();
+ }
+}
+} // end namespace __hwasan
+
+#endif // HWASAN_CHECKS_H
diff --git a/lib/hwasan/hwasan_dynamic_shadow.cc b/lib/hwasan/hwasan_dynamic_shadow.cc
index 17338003aa65..87670f508283 100644
--- a/lib/hwasan/hwasan_dynamic_shadow.cc
+++ b/lib/hwasan/hwasan_dynamic_shadow.cc
@@ -13,6 +13,7 @@
///
//===----------------------------------------------------------------------===//
+#include "hwasan.h"
#include "hwasan_dynamic_shadow.h"
#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -35,12 +36,16 @@ static void UnmapFromTo(uptr from, uptr to) {
}
}
-// Returns an address aligned to 8 pages, such that one page on the left and
-// shadow_size_bytes bytes on the right of it are mapped r/o.
+// Returns an address aligned to kShadowBaseAlignment, such that
+// 2**kShadowBaseAlingment on the left and shadow_size_bytes bytes on the right
+// of it are mapped no access.
static uptr MapDynamicShadow(uptr shadow_size_bytes) {
const uptr granularity = GetMmapGranularity();
- const uptr alignment = granularity * SHADOW_GRANULARITY;
- const uptr left_padding = granularity;
+ const uptr min_alignment = granularity << kShadowScale;
+ const uptr alignment = 1ULL << kShadowBaseAlignment;
+ CHECK_GE(alignment, min_alignment);
+
+ const uptr left_padding = 1ULL << kShadowBaseAlignment;
const uptr shadow_size =
RoundUpTo(shadow_size_bytes, granularity);
const uptr map_size = shadow_size + left_padding + alignment;
@@ -58,8 +63,7 @@ static uptr MapDynamicShadow(uptr shadow_size_bytes) {
} // namespace __hwasan
-#if HWASAN_PREMAP_SHADOW
-
+#if SANITIZER_ANDROID
extern "C" {
INTERFACE_ATTRIBUTE void __hwasan_shadow();
@@ -117,16 +121,22 @@ void __hwasan_shadow();
} // extern "C"
-#endif // HWASAN_PREMAP_SHADOW
-
namespace __hwasan {
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
-#if HWASAN_PREMAP_SHADOW
if (IsPremapShadowAvailable())
return FindPremappedShadowStart(shadow_size_bytes);
-#endif
return MapDynamicShadow(shadow_size_bytes);
}
} // namespace __hwasan
+#else
+namespace __hwasan {
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+ return MapDynamicShadow(shadow_size_bytes);
+}
+
+} // namespace __hwasan
+#
+#endif // SANITIZER_ANDROID
diff --git a/lib/hwasan/hwasan_flags.h b/lib/hwasan/hwasan_flags.h
index 16d60c4d8ba5..492d5bb98c27 100644
--- a/lib/hwasan/hwasan_flags.h
+++ b/lib/hwasan/hwasan_flags.h
@@ -1,4 +1,4 @@
-//===-- hwasan_flags.h --------------------------------------------*- C++ -*-===//
+//===-- hwasan_flags.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/hwasan/hwasan_flags.inc b/lib/hwasan/hwasan_flags.inc
index c45781168d6c..b450ab9503f9 100644
--- a/lib/hwasan/hwasan_flags.inc
+++ b/lib/hwasan/hwasan_flags.inc
@@ -17,9 +17,10 @@
// HWASAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
+HWASAN_FLAG(bool, verbose_threads, false,
+ "inform on thread creation/destruction")
HWASAN_FLAG(bool, tag_in_malloc, true, "")
HWASAN_FLAG(bool, tag_in_free, true, "")
-HWASAN_FLAG(bool, retag_in_realloc, true, "")
HWASAN_FLAG(bool, print_stats, false, "")
HWASAN_FLAG(bool, halt_on_error, true, "")
HWASAN_FLAG(bool, atexit, false, "")
@@ -31,3 +32,57 @@ HWASAN_FLAG(bool, disable_allocator_tagging, false, "")
// If false, use simple increment of a thread local counter to generate new
// tags.
HWASAN_FLAG(bool, random_tags, true, "")
+
+HWASAN_FLAG(
+ int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
+ "HWASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.")
+
+// Rules for malloc alignment on aarch64:
+// * If the size is 16-aligned, then malloc should return 16-aligned memory.
+// * Otherwise, malloc should return 8-alignment memory.
+// So,
+// * If the size is 16-aligned, we don't need to do anything.
+// * Otherwise we don't have to obey 16-alignment, just the 8-alignment.
+// * We may want to break the 8-alignment rule to catch more buffer overflows
+// but this will break valid code in some rare cases, like this:
+// struct Foo {
+// // accessed via atomic instructions that require 8-alignment.
+// std::atomic<int64_t> atomic_stuff;
+// ...
+// char vla[1]; // the actual size of vla could be anything.
+// }
+// Which means that the safe values for malloc_align_right are 0, 8, 9,
+// and the values 1 and 2 may require changes in otherwise valid code.
+
+HWASAN_FLAG(
+ int, malloc_align_right, 0, // off by default
+ "HWASan allocator flag. "
+ "0 (default): allocations are always aligned left to 16-byte boundary; "
+ "1: allocations are sometimes aligned right to 1-byte boundary (risky); "
+ "2: allocations are always aligned right to 1-byte boundary (risky); "
+ "8: allocations are sometimes aligned right to 8-byte boundary; "
+ "9: allocations are always aligned right to 8-byte boundary."
+ )
+HWASAN_FLAG(bool, free_checks_tail_magic, 1,
+ "If set, free() will check the magic values "
+ "to the right of the allocated object "
+ "if the allocation size is not a divident of the granule size")
+HWASAN_FLAG(
+ int, max_free_fill_size, 0,
+ "HWASan allocator flag. max_free_fill_size is the maximal amount of "
+ "bytes that will be filled with free_fill_byte during free.")
+HWASAN_FLAG(int, malloc_fill_byte, 0xbe,
+ "Value used to fill the newly allocated memory.")
+HWASAN_FLAG(int, free_fill_byte, 0x55,
+ "Value used to fill deallocated memory.")
+HWASAN_FLAG(int, heap_history_size, 1023,
+ "The number of heap (de)allocations remembered per thread. "
+ "Affects the quality of heap-related reports, but not the ability "
+ "to find bugs.")
+HWASAN_FLAG(bool, export_memory_stats, true,
+ "Export up-to-date memory stats through /proc")
+HWASAN_FLAG(int, stack_history_size, 1024,
+ "The number of stack frames remembered per thread. "
+ "Affects the quality of stack-related reports, but not the ability "
+ "to find bugs.")
diff --git a/lib/hwasan/hwasan_interceptors.cc b/lib/hwasan/hwasan_interceptors.cc
index 66aab95db56f..fb0dcb8905c0 100644
--- a/lib/hwasan/hwasan_interceptors.cc
+++ b/lib/hwasan/hwasan_interceptors.cc
@@ -1,4 +1,4 @@
-//===-- hwasan_interceptors.cc ----------------------------------------------===//
+//===-- hwasan_interceptors.cc --------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,6 +17,7 @@
#include "interception/interception.h"
#include "hwasan.h"
+#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
#include "hwasan_thread.h"
#include "hwasan_poisoning.h"
@@ -44,24 +45,19 @@ using __sanitizer::atomic_load;
using __sanitizer::atomic_store;
using __sanitizer::atomic_uintptr_t;
-DECLARE_REAL(SIZE_T, strlen, const char *s)
-DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen)
-DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
-DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
-
bool IsInInterceptorScope() {
- HwasanThread *t = GetCurrentThread();
+ Thread *t = GetCurrentThread();
return t && t->InInterceptorScope();
}
struct InterceptorScope {
InterceptorScope() {
- HwasanThread *t = GetCurrentThread();
+ Thread *t = GetCurrentThread();
if (t)
t->EnterInterceptorScope();
}
~InterceptorScope() {
- HwasanThread *t = GetCurrentThread();
+ Thread *t = GetCurrentThread();
if (t)
t->LeaveInterceptorScope();
}
@@ -92,66 +88,24 @@ static void *AllocateFromLocalPool(uptr size_in_bytes) {
} while (0)
-
-#define HWASAN_READ_RANGE(ctx, offset, size) \
- CHECK_UNPOISONED(offset, size)
-#define HWASAN_WRITE_RANGE(ctx, offset, size) \
- CHECK_UNPOISONED(offset, size)
-
-
-
-// Check that [x, x+n) range is unpoisoned.
-#define CHECK_UNPOISONED_0(x, n) \
- do { \
- sptr __offset = __hwasan_test_shadow(x, n); \
- if (__hwasan::IsInSymbolizer()) break; \
- if (__offset >= 0) { \
- GET_CALLER_PC_BP_SP; \
- (void)sp; \
- ReportInvalidAccessInsideAddressRange(__func__, x, n, __offset); \
- __hwasan::PrintWarning(pc, bp); \
- if (__hwasan::flags()->halt_on_error) { \
- Printf("Exiting\n"); \
- Die(); \
- } \
- } \
- } while (0)
-
-// Check that [x, x+n) range is unpoisoned unless we are in a nested
-// interceptor.
-#define CHECK_UNPOISONED(x, n) \
- do { \
- if (!IsInInterceptorScope()) CHECK_UNPOISONED_0(x, n); \
- } while (0)
-
-#define CHECK_UNPOISONED_STRING_OF_LEN(x, len, n) \
- CHECK_UNPOISONED((x), \
- common_flags()->strict_string_checks ? (len) + 1 : (n) )
-
-
-INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) {
+int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
CHECK_NE(memptr, 0);
int res = hwasan_posix_memalign(memptr, alignment, size, &stack);
return res;
}
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-INTERCEPTOR(void *, memalign, SIZE_T alignment, SIZE_T size) {
+void * __sanitizer_memalign(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_memalign(alignment, size, &stack);
}
-#define HWASAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
-#else
-#define HWASAN_MAYBE_INTERCEPT_MEMALIGN
-#endif
-INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) {
+void * __sanitizer_aligned_alloc(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_aligned_alloc(alignment, size, &stack);
}
-INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) {
+void * __sanitizer___libc_memalign(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
void *ptr = hwasan_memalign(alignment, size, &stack);
if (ptr)
@@ -159,80 +113,47 @@ INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) {
return ptr;
}
-INTERCEPTOR(void *, valloc, SIZE_T size) {
+void * __sanitizer_valloc(uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_valloc(size, &stack);
}
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-INTERCEPTOR(void *, pvalloc, SIZE_T size) {
+void * __sanitizer_pvalloc(uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_pvalloc(size, &stack);
}
-#define HWASAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
-#else
-#define HWASAN_MAYBE_INTERCEPT_PVALLOC
-#endif
-INTERCEPTOR(void, free, void *ptr) {
+void __sanitizer_free(void *ptr) {
GET_MALLOC_STACK_TRACE;
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
- HwasanDeallocate(&stack, ptr);
+ hwasan_free(ptr, &stack);
}
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-INTERCEPTOR(void, cfree, void *ptr) {
+void __sanitizer_cfree(void *ptr) {
GET_MALLOC_STACK_TRACE;
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
- HwasanDeallocate(&stack, ptr);
+ hwasan_free(ptr, &stack);
}
-#define HWASAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
-#else
-#define HWASAN_MAYBE_INTERCEPT_CFREE
-#endif
-INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+uptr __sanitizer_malloc_usable_size(const void *ptr) {
return __sanitizer_get_allocated_size(ptr);
}
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-// This function actually returns a struct by value, but we can't unpoison a
-// temporary! The following is equivalent on all supported platforms but
-// aarch64 (which uses a different register for sret value). We have a test
-// to confirm that.
-INTERCEPTOR(void, mallinfo, __sanitizer_mallinfo *sret) {
-#ifdef __aarch64__
- uptr r8;
- asm volatile("mov %0,x8" : "=r" (r8));
- sret = reinterpret_cast<__sanitizer_mallinfo*>(r8);
-#endif
- REAL(memset)(sret, 0, sizeof(*sret));
+struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
+ __sanitizer_struct_mallinfo sret;
+ internal_memset(&sret, 0, sizeof(sret));
+ return sret;
}
-#define HWASAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
-#else
-#define HWASAN_MAYBE_INTERCEPT_MALLINFO
-#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-INTERCEPTOR(int, mallopt, int cmd, int value) {
- return -1;
+int __sanitizer_mallopt(int cmd, int value) {
+ return 0;
}
-#define HWASAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
-#else
-#define HWASAN_MAYBE_INTERCEPT_MALLOPT
-#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-INTERCEPTOR(void, malloc_stats, void) {
+void __sanitizer_malloc_stats(void) {
// FIXME: implement, but don't call REAL(malloc_stats)!
}
-#define HWASAN_MAYBE_INTERCEPT_MALLOC_STATS INTERCEPT_FUNCTION(malloc_stats)
-#else
-#define HWASAN_MAYBE_INTERCEPT_MALLOC_STATS
-#endif
-
-INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
+void * __sanitizer_calloc(uptr nmemb, uptr size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(!hwasan_inited))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
@@ -240,7 +161,7 @@ INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
return hwasan_calloc(nmemb, size, &stack);
}
-INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
+void * __sanitizer_realloc(void *ptr, uptr size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
@@ -258,7 +179,7 @@ INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
return hwasan_realloc(ptr, size, &stack);
}
-INTERCEPTOR(void *, malloc, SIZE_T size) {
+void * __sanitizer_malloc(uptr size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(!hwasan_init_is_running))
ENSURE_HWASAN_INITED();
@@ -268,48 +189,44 @@ INTERCEPTOR(void *, malloc, SIZE_T size) {
return hwasan_malloc(size, &stack);
}
-template <class Mmap>
-static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T sz, int prot,
- int flags, int fd, OFF64_T off) {
- if (addr && !MEM_IS_APP(addr)) {
- if (flags & map_fixed) {
- errno = errno_EINVAL;
- return (void *)-1;
- } else {
- addr = nullptr;
- }
- }
- return real_mmap(addr, sz, prot, flags, fd, off);
-}
-
-extern "C" int pthread_attr_init(void *attr);
-extern "C" int pthread_attr_destroy(void *attr);
-
-static void *HwasanThreadStartFunc(void *arg) {
- HwasanThread *t = (HwasanThread *)arg;
- SetCurrentThread(t);
- return t->ThreadStart();
-}
-
-INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
- void * param) {
- ENSURE_HWASAN_INITED(); // for GetTlsSize()
- __sanitizer_pthread_attr_t myattr;
- if (!attr) {
- pthread_attr_init(&myattr);
- attr = &myattr;
- }
+#if HWASAN_WITH_INTERCEPTORS
+#define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
+ ALIAS("__sanitizer_" #FN); \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
+ ARGS) ALIAS("__sanitizer_" #FN)
+
+INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
+ SIZE_T size);
+INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size);
+INTERCEPTOR_ALIAS(void, free, void *ptr);
+INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr);
+INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
- AdjustStackSize(attr);
-
- HwasanThread *t = HwasanThread::Create(callback, param);
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
+INTERCEPTOR_ALIAS(void, cfree, void *ptr);
+INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
+INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
+INTERCEPTOR_ALIAS(void, malloc_stats, void);
+#endif
+#endif // HWASAN_WITH_INTERCEPTORS
- int res = REAL(pthread_create)(th, attr, HwasanThreadStartFunc, t);
- if (attr == &myattr)
- pthread_attr_destroy(&myattr);
+#if HWASAN_WITH_INTERCEPTORS && !defined(__aarch64__)
+INTERCEPTOR(int, pthread_create, void *th, void *attr,
+ void *(*callback)(void *), void *param) {
+ ScopedTaggingDisabler disabler;
+ int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr),
+ callback, param);
return res;
}
+#endif
static void BeforeFork() {
StackDepotLockAll();
@@ -341,137 +258,21 @@ int OnExit() {
} // namespace __hwasan
-// A version of CHECK_UNPOISONED using a saved scope value. Used in common
-// interceptors.
-#define CHECK_UNPOISONED_CTX(ctx, x, n) \
- do { \
- if (!((HwasanInterceptorContext *)ctx)->in_interceptor_scope) \
- CHECK_UNPOISONED_0(x, n); \
- } while (0)
-
-#define HWASAN_INTERCEPT_FUNC(name) \
- do { \
- if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \
- VReport(1, "HWAddressSanitizer: failed to intercept '" #name "'\n"); \
- } while (0)
-
-#define HWASAN_INTERCEPT_FUNC_VER(name, ver) \
- do { \
- if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \
- VReport( \
- 1, "HWAddressSanitizer: failed to intercept '" #name "@@" #ver "'\n"); \
- } while (0)
-
-#define COMMON_INTERCEPT_FUNCTION(name) HWASAN_INTERCEPT_FUNC(name)
-#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
- HWASAN_INTERCEPT_FUNC_VER(name, ver)
-#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
- CHECK_UNPOISONED_CTX(ctx, ptr, size)
-#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
- CHECK_UNPOISONED_CTX(ctx, ptr, size)
-#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ptr, size) \
- HWASAN_WRITE_RANGE(ctx, ptr, size)
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- if (hwasan_init_is_running) return REAL(func)(__VA_ARGS__); \
- ENSURE_HWASAN_INITED(); \
- HwasanInterceptorContext hwasan_ctx = {IsInInterceptorScope()}; \
- ctx = (void *)&hwasan_ctx; \
- (void)ctx; \
- InterceptorScope interceptor_scope;
-#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
- do { \
- } while (false)
-#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
- do { \
- } while (false)
-#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
- do { \
- } while (false)
-#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
- do { \
- } while (false)
-#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
- do { \
- } while (false) // FIXME
-#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
- do { \
- } while (false) // FIXME
-#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
-#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
-
-#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
- if (HwasanThread *t = GetCurrentThread()) { \
- *begin = t->tls_begin(); \
- *end = t->tls_end(); \
- } else { \
- *begin = *end = 0; \
- }
-
-#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
- { \
- COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
- if (common_flags()->intercept_intrin && \
- MEM_IS_APP(GetAddressFromPointer(dst))) \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- return REAL(memset)(dst, v, size); \
- }
-
-#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, fd, \
- offset) \
- do { \
- return mmap_interceptor(REAL(mmap), addr, length, prot, flags, fd, \
- offset); \
- } while (false)
-
-#include "sanitizer_common/sanitizer_platform_interceptors.h"
-#include "sanitizer_common/sanitizer_common_interceptors.inc"
-#include "sanitizer_common/sanitizer_signal_interceptors.inc"
-
-#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s)
-#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
- do { \
- (void)(p); \
- (void)(s); \
- } while (false)
-#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
- do { \
- (void)(p); \
- (void)(s); \
- } while (false)
-#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
- do { \
- (void)(p); \
- (void)(s); \
- } while (false)
-#include "sanitizer_common/sanitizer_common_syscalls.inc"
-#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
-
-
-
namespace __hwasan {
void InitializeInterceptors() {
static int inited = 0;
CHECK_EQ(inited, 0);
- InitializeCommonInterceptors();
- InitializeSignalInterceptors();
-
- INTERCEPT_FUNCTION(posix_memalign);
- HWASAN_MAYBE_INTERCEPT_MEMALIGN;
- INTERCEPT_FUNCTION(__libc_memalign);
- INTERCEPT_FUNCTION(valloc);
- HWASAN_MAYBE_INTERCEPT_PVALLOC;
- INTERCEPT_FUNCTION(malloc);
- INTERCEPT_FUNCTION(calloc);
+
+ INTERCEPT_FUNCTION(fork);
+
+#if HWASAN_WITH_INTERCEPTORS
+#if !defined(__aarch64__)
+ INTERCEPT_FUNCTION(pthread_create);
+#endif
INTERCEPT_FUNCTION(realloc);
INTERCEPT_FUNCTION(free);
- HWASAN_MAYBE_INTERCEPT_CFREE;
- INTERCEPT_FUNCTION(malloc_usable_size);
- HWASAN_MAYBE_INTERCEPT_MALLINFO;
- HWASAN_MAYBE_INTERCEPT_MALLOPT;
- HWASAN_MAYBE_INTERCEPT_MALLOC_STATS;
- INTERCEPT_FUNCTION(pthread_create);
- INTERCEPT_FUNCTION(fork);
+#endif
inited = 1;
}
diff --git a/lib/hwasan/hwasan_interface_internal.h b/lib/hwasan/hwasan_interface_internal.h
index b4e5c80904df..d3b2087d03c6 100644
--- a/lib/hwasan/hwasan_interface_internal.h
+++ b/lib/hwasan/hwasan_interface_internal.h
@@ -1,4 +1,4 @@
-//===-- hwasan_interface_internal.h -------------------------------*- C++ -*-===//
+//===-- hwasan_interface_internal.h -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,10 +16,14 @@
#define HWASAN_INTERFACE_INTERNAL_H
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_shadow_init();
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_init();
using __sanitizer::uptr;
@@ -33,6 +37,9 @@ using __sanitizer::u16;
using __sanitizer::u8;
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_init_frames(uptr, uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __hwasan_shadow_memory_dynamic_address;
SANITIZER_INTERFACE_ATTRIBUTE
@@ -91,6 +98,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
SANITIZER_INTERFACE_ATTRIBUTE
+uptr __hwasan_tag_pointer(uptr p, u8 tag);
+
+SANITIZER_INTERFACE_ATTRIBUTE
u8 __hwasan_generate_tag();
// Returns the offset of the first tag mismatch or -1 if the whole range is
@@ -105,6 +115,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_print_shadow(const void *x, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_handle_longjmp(const void *sp_dst);
+
+SANITIZER_INTERFACE_ATTRIBUTE
u16 __sanitizer_unaligned_load16(const uu16 *p);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -128,6 +141,66 @@ void __hwasan_enable_allocator_tagging();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_disable_allocator_tagging();
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_thread_enter();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_thread_exit();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_print_memory_usage();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_memalign(uptr alignment, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_aligned_alloc(uptr alignment, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer___libc_memalign(uptr alignment, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_valloc(uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_pvalloc(uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_free(void *ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_cfree(void *ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_malloc_usable_size(const void *ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+__hwasan::__sanitizer_struct_mallinfo __sanitizer_mallinfo();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_mallopt(int cmd, int value);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_malloc_stats(void);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_calloc(uptr nmemb, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_realloc(void *ptr, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_malloc(uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memcpy(void *dst, const void *src, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memset(void *s, int c, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memmove(void *dest, const void *src, uptr n);
} // extern "C"
#endif // HWASAN_INTERFACE_INTERNAL_H
diff --git a/lib/hwasan/hwasan_linux.cc b/lib/hwasan/hwasan_linux.cc
index 5ab98dca594d..5b0a8b4ac98f 100644
--- a/lib/hwasan/hwasan_linux.cc
+++ b/lib/hwasan/hwasan_linux.cc
@@ -22,7 +22,9 @@
#include "hwasan_mapping.h"
#include "hwasan_report.h"
#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+#include <dlfcn.h>
#include <elf.h>
#include <link.h>
#include <pthread.h>
@@ -37,6 +39,11 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_procmaps.h"
+#if HWASAN_WITH_INTERCEPTORS && !SANITIZER_ANDROID
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL uptr __hwasan_tls;
+#endif
+
namespace __hwasan {
static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
@@ -51,8 +58,6 @@ static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
size);
Abort();
}
- if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size);
- if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
}
static void ProtectGap(uptr addr, uptr size) {
@@ -103,55 +108,31 @@ static void PrintAddressSpaceLayout() {
else
CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
- if (SHADOW_OFFSET) {
- if (kLowShadowEnd + 1 < kHighShadowStart)
- PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
- else
- CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
- PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
- if (kLowMemEnd + 1 < kLowShadowStart)
- PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
- else
- CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
- PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
- CHECK_EQ(0, kLowMemStart);
- } else {
- if (kLowMemEnd + 1 < kHighShadowStart)
- PrintRange(kLowMemEnd + 1, kHighShadowStart - 1, "ShadowGap");
- else
- CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
- PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
- CHECK_EQ(kLowShadowEnd + 1, kLowMemStart);
- PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
- PrintRange(0, kLowShadowStart - 1, "ShadowGap");
- }
+ if (kLowShadowEnd + 1 < kHighShadowStart)
+ PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
+ PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
+ if (kLowMemEnd + 1 < kLowShadowStart)
+ PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
+ PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
+ CHECK_EQ(0, kLowMemStart);
}
static uptr GetHighMemEnd() {
// HighMem covers the upper part of the address space.
uptr max_address = GetMaxUserVirtualAddress();
- if (SHADOW_OFFSET)
- // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
- // properly aligned:
- max_address |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
+ // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
+ // properly aligned:
+ max_address |= (GetMmapGranularity() << kShadowScale) - 1;
return max_address;
}
static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
- // Set the shadow memory address to uninitialized.
- __hwasan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
- uptr shadow_start = SHADOW_OFFSET;
- // Detect if a dynamic shadow address must be used and find the available
- // location when necessary. When dynamic address is used, the macro
- // kLowShadowBeg expands to __hwasan_shadow_memory_dynamic_address which
- // was just set to kDefaultShadowSentinel.
- if (shadow_start == kDefaultShadowSentinel) {
- __hwasan_shadow_memory_dynamic_address = 0;
- CHECK_EQ(0, SHADOW_OFFSET);
- shadow_start = FindDynamicShadowStart(shadow_size_bytes);
- }
- // Update the shadow memory address (potentially) used by instrumentation.
- __hwasan_shadow_memory_dynamic_address = shadow_start;
+ __hwasan_shadow_memory_dynamic_address =
+ FindDynamicShadowStart(shadow_size_bytes);
}
bool InitShadow() {
@@ -159,29 +140,23 @@ bool InitShadow() {
kHighMemEnd = GetHighMemEnd();
// Determine shadow memory base offset.
- InitializeShadowBaseAddress(MEM_TO_SHADOW_SIZE(kHighMemEnd));
+ InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
// Place the low memory first.
- if (SHADOW_OFFSET) {
- kLowMemEnd = SHADOW_OFFSET - 1;
- kLowMemStart = 0;
- } else {
- // LowMem covers as much of the first 4GB as possible.
- kLowMemEnd = (1UL << 32) - 1;
- kLowMemStart = MEM_TO_SHADOW(kLowMemEnd) + 1;
- }
+ kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
+ kLowMemStart = 0;
// Define the low shadow based on the already placed low memory.
- kLowShadowEnd = MEM_TO_SHADOW(kLowMemEnd);
- kLowShadowStart = SHADOW_OFFSET ? SHADOW_OFFSET : MEM_TO_SHADOW(kLowMemStart);
+ kLowShadowEnd = MemToShadow(kLowMemEnd);
+ kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
// High shadow takes whatever memory is left up there (making sure it is not
// interfering with low memory in the fixed case).
- kHighShadowEnd = MEM_TO_SHADOW(kHighMemEnd);
- kHighShadowStart = Max(kLowMemEnd, MEM_TO_SHADOW(kHighShadowEnd)) + 1;
+ kHighShadowEnd = MemToShadow(kHighMemEnd);
+ kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
// High memory starts where allocated shadow allows.
- kHighMemStart = SHADOW_TO_MEM(kHighShadowStart);
+ kHighMemStart = ShadowToMem(kHighShadowStart);
// Check the sanity of the defined memory ranges (there might be gaps).
CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
@@ -190,10 +165,7 @@ bool InitShadow() {
CHECK_GT(kHighShadowStart, kLowMemEnd);
CHECK_GT(kLowMemEnd, kLowMemStart);
CHECK_GT(kLowShadowEnd, kLowShadowStart);
- if (SHADOW_OFFSET)
- CHECK_GT(kLowShadowStart, kLowMemEnd);
- else
- CHECK_GT(kLowMemEnd, kLowShadowStart);
+ CHECK_GT(kLowShadowStart, kLowMemEnd);
if (Verbosity())
PrintAddressSpaceLayout();
@@ -204,21 +176,43 @@ bool InitShadow() {
// Protect all the gaps.
ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
- if (SHADOW_OFFSET) {
- if (kLowMemEnd + 1 < kLowShadowStart)
- ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
- if (kLowShadowEnd + 1 < kHighShadowStart)
- ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
- } else {
- if (kLowMemEnd + 1 < kHighShadowStart)
- ProtectGap(kLowMemEnd + 1, kHighShadowStart - kLowMemEnd - 1);
- }
+ if (kLowMemEnd + 1 < kLowShadowStart)
+ ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
+ if (kLowShadowEnd + 1 < kHighShadowStart)
+ ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
if (kHighShadowEnd + 1 < kHighMemStart)
ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
return true;
}
+void InitThreads() {
+ CHECK(__hwasan_shadow_memory_dynamic_address);
+ uptr guard_page_size = GetMmapGranularity();
+ uptr thread_space_start =
+ __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
+ uptr thread_space_end =
+ __hwasan_shadow_memory_dynamic_address - guard_page_size;
+ ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
+ "hwasan threads");
+ ProtectGap(thread_space_end,
+ __hwasan_shadow_memory_dynamic_address - thread_space_end);
+ InitThreadList(thread_space_start, thread_space_end - thread_space_start);
+}
+
+static void MadviseShadowRegion(uptr beg, uptr end) {
+ uptr size = end - beg + 1;
+ if (common_flags()->no_huge_pages_for_shadow)
+ NoHugePagesInRegion(beg, size);
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(beg, size);
+}
+
+void MadviseShadow() {
+ MadviseShadowRegion(kLowShadowStart, kLowShadowEnd);
+ MadviseShadowRegion(kHighShadowStart, kHighShadowEnd);
+}
+
bool MemIsApp(uptr p) {
CHECK(GetTagFromPointer(p) == 0);
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
@@ -240,37 +234,81 @@ void InstallAtExitHandler() {
// ---------------------- TSD ---------------- {{{1
+extern "C" void __hwasan_thread_enter() {
+ hwasanThreadList().CreateCurrentThread();
+}
+
+extern "C" void __hwasan_thread_exit() {
+ Thread *t = GetCurrentThread();
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ if (t)
+ hwasanThreadList().ReleaseThread(t);
+}
+
+#if HWASAN_WITH_INTERCEPTORS
static pthread_key_t tsd_key;
static bool tsd_key_inited = false;
-void HwasanTSDInit(void (*destructor)(void *tsd)) {
+void HwasanTSDThreadInit() {
+ if (tsd_key_inited)
+ CHECK_EQ(0, pthread_setspecific(tsd_key,
+ (void *)GetPthreadDestructorIterations()));
+}
+
+void HwasanTSDDtor(void *tsd) {
+ uptr iterations = (uptr)tsd;
+ if (iterations > 1) {
+ CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
+ return;
+ }
+ __hwasan_thread_exit();
+}
+
+void HwasanTSDInit() {
CHECK(!tsd_key_inited);
tsd_key_inited = true;
- CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
+ CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
}
+#else
+void HwasanTSDInit() {}
+void HwasanTSDThreadInit() {}
+#endif
-HwasanThread *GetCurrentThread() {
- return (HwasanThread*)pthread_getspecific(tsd_key);
+#if SANITIZER_ANDROID
+uptr *GetCurrentThreadLongPtr() {
+ return (uptr *)get_android_tls_ptr();
}
-
-void SetCurrentThread(HwasanThread *t) {
- // Make sure that HwasanTSDDtor gets called at the end.
- CHECK(tsd_key_inited);
- // Make sure we do not reset the current HwasanThread.
- CHECK_EQ(0, pthread_getspecific(tsd_key));
- pthread_setspecific(tsd_key, (void *)t);
+#else
+uptr *GetCurrentThreadLongPtr() {
+ return &__hwasan_tls;
}
+#endif
-void HwasanTSDDtor(void *tsd) {
- HwasanThread *t = (HwasanThread*)tsd;
- if (t->destructor_iterations_ > 1) {
- t->destructor_iterations_--;
- CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
- return;
+#if SANITIZER_ANDROID
+void AndroidTestTlsSlot() {
+ uptr kMagicValue = 0x010203040A0B0C0D;
+ *(uptr *)get_android_tls_ptr() = kMagicValue;
+ dlerror();
+ if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
+ Printf(
+ "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
+ "for dlerror().\n");
+ Die();
}
- // Make sure that signal handler can not see a stale current thread pointer.
- atomic_signal_fence(memory_order_seq_cst);
- HwasanThread::TSDDtor(tsd);
+}
+#else
+void AndroidTestTlsSlot() {}
+#endif
+
+Thread *GetCurrentThread() {
+ uptr *ThreadLong = GetCurrentThreadLongPtr();
+#if HWASAN_WITH_INTERCEPTORS
+ if (!*ThreadLong)
+ __hwasan_thread_enter();
+#endif
+ auto *R = (StackAllocationsRingBuffer *)ThreadLong;
+ return hwasanThreadList().GetThreadByBufferAddress((uptr)(R->Next()));
}
struct AccessInfo {
@@ -340,14 +378,13 @@ static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
BufferedStackTrace *stack = stack_buffer.data();
stack->Reset();
SignalContext sig{info, uc};
- GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, uc,
- common_flags()->fast_unwind_on_fatal);
-
- ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store);
+ GetStackTrace(stack, kStackTraceMax, StackTrace::GetNextInstructionPc(sig.pc),
+ sig.bp, uc, common_flags()->fast_unwind_on_fatal);
++hwasan_report_count;
- if (flags()->halt_on_error || !ai.recover)
- Die();
+
+ bool fatal = flags()->halt_on_error || !ai.recover;
+ ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal);
#if defined(__aarch64__)
uc->uc_mcontext.pc += 4;
@@ -360,8 +397,8 @@ static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
static void OnStackUnwind(const SignalContext &sig, const void *,
BufferedStackTrace *stack) {
- GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context,
- common_flags()->fast_unwind_on_fatal);
+ GetStackTrace(stack, kStackTraceMax, StackTrace::GetNextInstructionPc(sig.pc),
+ sig.bp, sig.context, common_flags()->fast_unwind_on_fatal);
}
void HwasanOnDeadlySignal(int signo, void *info, void *context) {
diff --git a/lib/hwasan/hwasan_mapping.h b/lib/hwasan/hwasan_mapping.h
index 650a5aefcb2d..e5e23dc60337 100644
--- a/lib/hwasan/hwasan_mapping.h
+++ b/lib/hwasan/hwasan_mapping.h
@@ -16,68 +16,41 @@
#define HWASAN_MAPPING_H
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "hwasan_interface_internal.h"
-// Typical mapping on Linux/x86_64 with fixed shadow mapping:
-// || [0x080000000000, 0x7fffffffffff] || HighMem ||
-// || [0x008000000000, 0x07ffffffffff] || HighShadow ||
-// || [0x000100000000, 0x007fffffffff] || ShadowGap ||
-// || [0x000010000000, 0x0000ffffffff] || LowMem ||
-// || [0x000001000000, 0x00000fffffff] || LowShadow ||
-// || [0x000000000000, 0x000000ffffff] || ShadowGap ||
-//
-// and with dynamic shadow mapped at [0x770d59f40000, 0x7f0d59f40000]:
+// Typical mapping on Linux/x86_64:
+// with dynamic shadow mapped at [0x770d59f40000, 0x7f0d59f40000]:
// || [0x7f0d59f40000, 0x7fffffffffff] || HighMem ||
// || [0x7efe2f934000, 0x7f0d59f3ffff] || HighShadow ||
// || [0x7e7e2f934000, 0x7efe2f933fff] || ShadowGap ||
// || [0x770d59f40000, 0x7e7e2f933fff] || LowShadow ||
// || [0x000000000000, 0x770d59f3ffff] || LowMem ||
-// Typical mapping on Android/AArch64 (39-bit VMA):
-// || [0x001000000000, 0x007fffffffff] || HighMem ||
-// || [0x000800000000, 0x000fffffffff] || ShadowGap ||
-// || [0x000100000000, 0x0007ffffffff] || HighShadow ||
-// || [0x000010000000, 0x0000ffffffff] || LowMem ||
-// || [0x000001000000, 0x00000fffffff] || LowShadow ||
-// || [0x000000000000, 0x000000ffffff] || ShadowGap ||
-//
-// and with dynamic shadow mapped: [0x007477480000, 0x007c77480000]:
+// Typical mapping on Android/AArch64
+// with dynamic shadow mapped: [0x007477480000, 0x007c77480000]:
// || [0x007c77480000, 0x007fffffffff] || HighMem ||
// || [0x007c3ebc8000, 0x007c7747ffff] || HighShadow ||
// || [0x007bbebc8000, 0x007c3ebc7fff] || ShadowGap ||
// || [0x007477480000, 0x007bbebc7fff] || LowShadow ||
// || [0x000000000000, 0x00747747ffff] || LowMem ||
-static constexpr __sanitizer::u64 kDefaultShadowSentinel = ~(__sanitizer::u64)0;
-
// Reasonable values are 4 (for 1/16th shadow) and 6 (for 1/64th).
-constexpr __sanitizer::uptr kShadowScale = 4;
-constexpr __sanitizer::uptr kShadowAlignment = 1ULL << kShadowScale;
-
-#if SANITIZER_ANDROID
-# define HWASAN_FIXED_MAPPING 0
-#else
-# define HWASAN_FIXED_MAPPING 1
-#endif
-
-#if HWASAN_FIXED_MAPPING
-# define SHADOW_OFFSET (0)
-# define HWASAN_PREMAP_SHADOW 0
-#else
-# define SHADOW_OFFSET (__hwasan_shadow_memory_dynamic_address)
-# define HWASAN_PREMAP_SHADOW 1
-#endif
-
-#define SHADOW_GRANULARITY (1ULL << kShadowScale)
-
-#define MEM_TO_SHADOW(mem) (((uptr)(mem) >> kShadowScale) + SHADOW_OFFSET)
-#define SHADOW_TO_MEM(shadow) (((uptr)(shadow) - SHADOW_OFFSET) << kShadowScale)
-
-#define MEM_TO_SHADOW_SIZE(size) ((uptr)(size) >> kShadowScale)
-
-#define MEM_IS_APP(mem) MemIsApp((uptr)(mem))
+constexpr uptr kShadowScale = 4;
+constexpr uptr kShadowAlignment = 1ULL << kShadowScale;
namespace __hwasan {
+inline uptr MemToShadow(uptr untagged_addr) {
+ return (untagged_addr >> kShadowScale) +
+ __hwasan_shadow_memory_dynamic_address;
+}
+inline uptr ShadowToMem(uptr shadow_addr) {
+ return (shadow_addr - __hwasan_shadow_memory_dynamic_address) << kShadowScale;
+}
+inline uptr MemToShadowSize(uptr size) {
+ return size >> kShadowScale;
+}
+
bool MemIsApp(uptr p);
} // namespace __hwasan
diff --git a/lib/hwasan/hwasan_memintrinsics.cc b/lib/hwasan/hwasan_memintrinsics.cc
new file mode 100644
index 000000000000..9cb844e45c40
--- /dev/null
+++ b/lib/hwasan/hwasan_memintrinsics.cc
@@ -0,0 +1,45 @@
+//===-- hwasan_memintrinsics.cc ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and contains HWASAN versions of
+/// memset, memcpy and memmove
+///
+//===----------------------------------------------------------------------===//
+
+#include <string.h>
+#include "hwasan.h"
+#include "hwasan_checks.h"
+#include "hwasan_flags.h"
+#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+using namespace __hwasan;
+
+void *__hwasan_memset(void *block, int c, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(block), size);
+ return memset(UntagPtr(block), c, size);
+}
+
+void *__hwasan_memcpy(void *to, const void *from, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memcpy(UntagPtr(to), UntagPtr(from), size);
+}
+
+void *__hwasan_memmove(void *to, const void *from, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memmove(UntagPtr(to), UntagPtr(from), size);
+}
diff --git a/lib/hwasan/hwasan_new_delete.cc b/lib/hwasan/hwasan_new_delete.cc
index 63ca74edd481..f2e8faf5da73 100644
--- a/lib/hwasan/hwasan_new_delete.cc
+++ b/lib/hwasan/hwasan_new_delete.cc
@@ -51,7 +51,7 @@ void *operator new[](size_t size, std::nothrow_t const&) {
#define OPERATOR_DELETE_BODY \
GET_MALLOC_STACK_TRACE; \
- if (ptr) HwasanDeallocate(&stack, ptr)
+ if (ptr) hwasan_free(ptr, &stack)
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
diff --git a/lib/hwasan/hwasan_poisoning.cc b/lib/hwasan/hwasan_poisoning.cc
index b99d8ed0be79..9c8e16b12ad5 100644
--- a/lib/hwasan/hwasan_poisoning.cc
+++ b/lib/hwasan/hwasan_poisoning.cc
@@ -1,4 +1,4 @@
-//===-- hwasan_poisoning.cc ---------------------------------------*- C++ -*-===//
+//===-- hwasan_poisoning.cc -------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -22,8 +22,8 @@ namespace __hwasan {
uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
CHECK(IsAligned(p, kShadowAlignment));
CHECK(IsAligned(size, kShadowAlignment));
- uptr shadow_start = MEM_TO_SHADOW(p);
- uptr shadow_size = MEM_TO_SHADOW_SIZE(size);
+ uptr shadow_start = MemToShadow(p);
+ uptr shadow_size = MemToShadowSize(size);
internal_memset((void *)shadow_start, tag, shadow_size);
return AddTagToPointer(p, tag);
}
diff --git a/lib/hwasan/hwasan_poisoning.h b/lib/hwasan/hwasan_poisoning.h
index b44a91f975f5..0dbf9d8ed4ef 100644
--- a/lib/hwasan/hwasan_poisoning.h
+++ b/lib/hwasan/hwasan_poisoning.h
@@ -1,4 +1,4 @@
-//===-- hwasan_poisoning.h ----------------------------------------*- C++ -*-===//
+//===-- hwasan_poisoning.h --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/hwasan/hwasan_report.cc b/lib/hwasan/hwasan_report.cc
index 16e9016ea35b..ea3e4096daef 100644
--- a/lib/hwasan/hwasan_report.cc
+++ b/lib/hwasan/hwasan_report.cc
@@ -15,18 +15,64 @@
#include "hwasan.h"
#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
using namespace __sanitizer;
namespace __hwasan {
+class ScopedReport {
+ public:
+ ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
+ BlockingMutexLock lock(&error_message_lock_);
+ error_message_ptr_ = fatal ? &error_message_ : nullptr;
+ }
+
+ ~ScopedReport() {
+ BlockingMutexLock lock(&error_message_lock_);
+ if (fatal) {
+ SetAbortMessage(error_message_.data());
+ Die();
+ }
+ error_message_ptr_ = nullptr;
+ }
+
+ static void MaybeAppendToErrorMessage(const char *msg) {
+ BlockingMutexLock lock(&error_message_lock_);
+ if (!error_message_ptr_)
+ return;
+ uptr len = internal_strlen(msg);
+ uptr old_size = error_message_ptr_->size();
+ error_message_ptr_->resize(old_size + len);
+ // overwrite old trailing '\0', keep new trailing '\0' untouched.
+ internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
+ }
+ private:
+ ScopedErrorReportLock error_report_lock_;
+ InternalMmapVector<char> error_message_;
+ bool fatal;
+
+ static InternalMmapVector<char> *error_message_ptr_;
+ static BlockingMutex error_message_lock_;
+};
+
+InternalMmapVector<char> *ScopedReport::error_message_ptr_;
+BlockingMutex ScopedReport::error_message_lock_;
+
+// If there is an active ScopedReport, append to its error message.
+void AppendToErrorMessageBuffer(const char *buffer) {
+ ScopedReport::MaybeAppendToErrorMessage(buffer);
+}
+
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
StackTrace res = StackDepotGet(id);
@@ -34,100 +80,344 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
+// A RAII object that holds a copy of the current thread stack ring buffer.
+// The actual stack buffer may change while we are iterating over it (for
+// example, Printf may call syslog() which can itself be built with hwasan).
+class SavedStackAllocations {
+ public:
+ SavedStackAllocations(StackAllocationsRingBuffer *rb) {
+ uptr size = rb->size() * sizeof(uptr);
+ void *storage =
+ MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
+ new (&rb_) StackAllocationsRingBuffer(*rb, storage);
+ }
+
+ ~SavedStackAllocations() {
+ StackAllocationsRingBuffer *rb = get();
+ UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ }
+
+ StackAllocationsRingBuffer *get() {
+ return (StackAllocationsRingBuffer *)&rb_;
+ }
+
+ private:
+ uptr rb_;
+};
+
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
+ const char *Access() { return Blue(); }
const char *Allocation() const { return Magenta(); }
const char *Origin() const { return Magenta(); }
const char *Name() const { return Green(); }
+ const char *Location() { return Green(); }
+ const char *Thread() { return Green(); }
};
-struct HeapAddressDescription {
- uptr addr;
- u32 alloc_stack_id;
- u32 free_stack_id;
-
- void Print() const {
- Decorator d;
- if (free_stack_id) {
- Printf("%sfreed here:%s\n", d.Allocation(), d.Default());
- GetStackTraceFromId(free_stack_id).Print();
- Printf("%spreviously allocated here:%s\n", d.Allocation(), d.Default());
- } else {
- Printf("%sallocated here:%s\n", d.Allocation(), d.Default());
+// Returns the index of the rb element that matches tagged_addr (plus one),
+// or zero if found nothing.
+uptr FindHeapAllocation(HeapAllocationsRingBuffer *rb,
+ uptr tagged_addr,
+ HeapAllocationRecord *har) {
+ if (!rb) return 0;
+ for (uptr i = 0, size = rb->size(); i < size; i++) {
+ auto h = (*rb)[i];
+ if (h.tagged_addr <= tagged_addr &&
+ h.tagged_addr + h.requested_size > tagged_addr) {
+ *har = h;
+ return i + 1;
}
- GetStackTraceFromId(alloc_stack_id).Print();
}
-};
+ return 0;
+}
+
+void PrintAddressDescription(
+ uptr tagged_addr, uptr access_size,
+ StackAllocationsRingBuffer *current_stack_allocations) {
+ Decorator d;
+ int num_descriptions_printed = 0;
+ uptr untagged_addr = UntagAddr(tagged_addr);
+
+ // Print some very basic information about the address, if it's a heap.
+ HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
+ if (uptr beg = chunk.Beg()) {
+ uptr size = chunk.ActualSize();
+ Printf("%s[%p,%p) is a %s %s heap chunk; "
+ "size: %zd offset: %zd\n%s",
+ d.Location(),
+ beg, beg + size,
+ chunk.FromSmallHeap() ? "small" : "large",
+ chunk.IsAllocated() ? "allocated" : "unallocated",
+ size, untagged_addr - beg,
+ d.Default());
+ }
+
+ // Check if this looks like a heap buffer overflow by scanning
+ // the shadow left and right and looking for the first adjacent
+ // object with a different memory tag. If that tag matches addr_tag,
+ // check the allocator if it has a live chunk there.
+ tag_t addr_tag = GetTagFromPointer(tagged_addr);
+ tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
+ if (*tag_ptr != addr_tag) { // should be true usually.
+ tag_t *left = tag_ptr, *right = tag_ptr;
+ // scan left.
+ for (int i = 0; i < 1000 && *left == *tag_ptr; i++, left--){}
+ // scan right.
+ for (int i = 0; i < 1000 && *right == *tag_ptr; i++, right++){}
+ // Chose the object that has addr_tag and that is closer to addr.
+ tag_t *candidate = nullptr;
+ if (*right == addr_tag && *left == addr_tag)
+ candidate = right - tag_ptr < tag_ptr - left ? right : left;
+ else if (*right == addr_tag)
+ candidate = right;
+ else if (*left == addr_tag)
+ candidate = left;
+
+ if (candidate) {
+ uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
+ HwasanChunkView chunk = FindHeapChunkByAddress(mem);
+ if (chunk.IsAllocated()) {
+ Printf("%s", d.Location());
+ Printf(
+ "%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n",
+ untagged_addr,
+ candidate == left ? untagged_addr - chunk.End()
+ : chunk.Beg() - untagged_addr,
+ candidate == right ? "left" : "right", chunk.UsedSize(),
+ chunk.Beg(), chunk.End());
+ Printf("%s", d.Allocation());
+ Printf("allocated here:\n");
+ Printf("%s", d.Default());
+ GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ num_descriptions_printed++;
+ }
+ }
+ }
+
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ // Scan all threads' ring buffers to find if it's a heap-use-after-free.
+ HeapAllocationRecord har;
+ if (uptr D = FindHeapAllocation(t->heap_allocations(), tagged_addr, &har)) {
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
+ untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
+ har.requested_size, UntagAddr(har.tagged_addr),
+ UntagAddr(har.tagged_addr) + har.requested_size);
+ Printf("%s", d.Allocation());
+ Printf("freed by thread T%zd here:\n", t->unique_id());
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.free_context_id).Print();
+
+ Printf("%s", d.Allocation());
+ Printf("previously allocated here:\n", t);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.alloc_context_id).Print();
+
+ // Print a developer note: the index of this heap object
+ // in the thread's deallocation ring buffer.
+ Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", D,
+ flags()->heap_history_size);
+
+ t->Announce();
+ num_descriptions_printed++;
+ }
+
+ // Very basic check for stack memory.
+ if (t->AddrIsInStack(untagged_addr)) {
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
+ t->unique_id());
+ Printf("%s", d.Default());
+ t->Announce();
-bool GetHeapAddressInformation(uptr addr, uptr access_size,
- HeapAddressDescription *description) {
- HwasanChunkView chunk = FindHeapChunkByAddress(addr);
- if (!chunk.IsValid())
- return false;
- description->addr = addr;
- description->alloc_stack_id = chunk.GetAllocStackId();
- description->free_stack_id = chunk.GetFreeStackId();
- return true;
+ // Temporary report section, needs to be improved.
+ Printf("Previously allocated frames:\n");
+ auto *sa = (t == GetCurrentThread() && current_stack_allocations)
+ ? current_stack_allocations
+ : t->stack_allocations();
+ uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
+ InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ for (uptr i = 0; i < frames; i++) {
+ uptr record = (*sa)[i];
+ if (!record)
+ break;
+ uptr sp = (record >> 48) << 4;
+ uptr pc_mask = (1ULL << 48) - 1;
+ uptr pc = record & pc_mask;
+ if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
+ frame_desc.append(" sp: 0x%zx pc: %p ", sp, pc);
+ RenderFrame(&frame_desc, "in %f %s:%l\n", 0, frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ frame->ClearAll();
+ if (auto Descr = GetStackFrameDescr(pc))
+ frame_desc.append(" %s\n", Descr);
+ }
+ Printf("%s", frame_desc.data());
+ frame_desc.clear();
+ }
+
+ num_descriptions_printed++;
+ }
+ });
+
+ // Print the remaining threads, as an extra information, 1 line per thread.
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
+
+ if (!num_descriptions_printed)
+ // We exhausted our possibilities. Bail out.
+ Printf("HWAddressSanitizer can not describe address in more detail.\n");
}
-void PrintAddressDescription(uptr addr, uptr access_size) {
- HeapAddressDescription heap_description;
- if (GetHeapAddressInformation(addr, access_size, &heap_description)) {
- heap_description.Print();
- return;
+void ReportStats() {}
+
+static void PrintTagsAroundAddr(tag_t *tag_ptr) {
+ Printf(
+ "Memory tags around the buggy address (one tag corresponds to %zd "
+ "bytes):\n", kShadowAlignment);
+
+ const uptr row_len = 16; // better be power of two.
+ const uptr num_rows = 17;
+ tag_t *center_row_beg = reinterpret_cast<tag_t *>(
+ RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
+ tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
+ tag_t *end_row = center_row_beg + row_len * (num_rows / 2);
+ InternalScopedString s(GetPageSizeCached() * 8);
+ for (tag_t *row = beg_row; row < end_row; row += row_len) {
+ s.append("%s", row == center_row_beg ? "=>" : " ");
+ for (uptr i = 0; i < row_len; i++) {
+ s.append("%s", row + i == tag_ptr ? "[" : " ");
+ s.append("%02x", row[i]);
+ s.append("%s", row + i == tag_ptr ? "]" : " ");
+ }
+ s.append("%s\n", row == center_row_beg ? "<=" : " ");
}
- // We exhausted our possibilities. Bail out.
- Printf("HWAddressSanitizer can not describe address in more detail.\n");
+ Printf("%s", s.data());
}
-void ReportInvalidAccess(StackTrace *stack, u32 origin) {
- ScopedErrorReportLock l;
+void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
+ ScopedReport R(flags()->halt_on_error);
+ uptr untagged_addr = UntagAddr(tagged_addr);
+ tag_t ptr_tag = GetTagFromPointer(tagged_addr);
+ tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
+ tag_t mem_tag = *tag_ptr;
Decorator d;
- Printf("%s", d.Warning());
- Report("WARNING: HWAddressSanitizer: invalid access\n");
+ Printf("%s", d.Error());
+ uptr pc = stack->size ? stack->trace[0] : 0;
+ const char *bug_type = "invalid-free";
+ Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
+ untagged_addr, pc);
+ Printf("%s", d.Access());
+ Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
Printf("%s", d.Default());
+
stack->Print();
- ReportErrorSummary("invalid-access", stack);
-}
-void ReportStats() {}
+ PrintAddressDescription(tagged_addr, 0, nullptr);
-void ReportInvalidAccessInsideAddressRange(const char *what, const void *start,
- uptr size, uptr offset) {
- ScopedErrorReportLock l;
+ PrintTagsAroundAddr(tag_ptr);
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
+ uptr tail_size, const u8 *expected) {
+ ScopedReport R(flags()->halt_on_error);
Decorator d;
- Printf("%s", d.Warning());
- Printf("%sTag mismatch in %s%s%s at offset %zu inside [%p, %zu)%s\n",
- d.Warning(), d.Name(), what, d.Warning(), offset, start, size,
- d.Default());
- PrintAddressDescription((uptr)start + offset, 1);
- // if (__sanitizer::Verbosity())
- // DescribeMemoryRange(start, size);
+ uptr untagged_addr = UntagAddr(tagged_addr);
+ Printf("%s", d.Error());
+ const char *bug_type = "alocation-tail-overwritten";
+ Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
+ bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
+ Printf("\n%s", d.Default());
+ stack->Print();
+ HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
+ if (chunk.Beg()) {
+ Printf("%s", d.Allocation());
+ Printf("allocated here:\n");
+ Printf("%s", d.Default());
+ GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ }
+
+ InternalScopedString s(GetPageSizeCached() * 8);
+ CHECK_GT(tail_size, 0U);
+ CHECK_LT(tail_size, kShadowAlignment);
+ u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
+ s.append("Tail contains: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
+ s.append(".. ");
+ for (uptr i = 0; i < tail_size; i++)
+ s.append("%02x ", tail[i]);
+ s.append("\n");
+ s.append("Expected: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
+ s.append(".. ");
+ for (uptr i = 0; i < tail_size; i++)
+ s.append("%02x ", expected[i]);
+ s.append("\n");
+ s.append(" ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
+ s.append(" ");
+ for (uptr i = 0; i < tail_size; i++)
+ s.append("%s ", expected[i] != tail[i] ? "^^" : " ");
+
+ s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
+ "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
+ " char *x = new char[20];\n"
+ " x[25] = 42;\n"
+ "By default %s does not detect such bugs at the time of write,\n"
+ "but can detect them at the time of free/delete.\n"
+ "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0;\n"
+ "To enable checking at the time of access, set "
+ "HWASAN_OPTIONS=malloc_align_right to non-zero\n\n",
+ kShadowAlignment, SanitizerToolName);
+ Printf("%s", s.data());
+ GetCurrentThread()->Announce();
+
+ tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
+ PrintTagsAroundAddr(tag_ptr);
+
+ ReportErrorSummary(bug_type, stack);
}
-void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
- bool is_store) {
- ScopedErrorReportLock l;
+void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
+ bool is_store, bool fatal) {
+ ScopedReport R(fatal);
+ SavedStackAllocations current_stack_allocations(
+ GetCurrentThread()->stack_allocations());
Decorator d;
- Printf("%s", d.Warning());
- uptr address = GetAddressFromPointer(addr);
- Printf("%s of size %zu at %p\n", is_store ? "WRITE" : "READ", access_size,
- address);
-
- tag_t ptr_tag = GetTagFromPointer(addr);
- tag_t mem_tag = *(tag_t *)MEM_TO_SHADOW(address);
- Printf("pointer tag 0x%x\nmemory tag 0x%x\n", ptr_tag, mem_tag);
+ Printf("%s", d.Error());
+ uptr untagged_addr = UntagAddr(tagged_addr);
+ // TODO: when possible, try to print heap-use-after-free, etc.
+ const char *bug_type = "tag-mismatch";
+ uptr pc = stack->size ? stack->trace[0] : 0;
+ Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
+ untagged_addr, pc);
+
+ Thread *t = GetCurrentThread();
+
+ tag_t ptr_tag = GetTagFromPointer(tagged_addr);
+ tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
+ tag_t mem_tag = *tag_ptr;
+ Printf("%s", d.Access());
+ Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
+ is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
+ mem_tag, t->unique_id());
Printf("%s", d.Default());
stack->Print();
- PrintAddressDescription(address, access_size);
+ PrintAddressDescription(tagged_addr, access_size,
+ current_stack_allocations.get());
+ t->Announce();
+
+ PrintTagsAroundAddr(tag_ptr);
- ReportErrorSummary("tag-mismatch", stack);
+ ReportErrorSummary(bug_type, stack);
}
} // namespace __hwasan
diff --git a/lib/hwasan/hwasan_report.h b/lib/hwasan/hwasan_report.h
index bb33f1a87308..10fb20cc5be1 100644
--- a/lib/hwasan/hwasan_report.h
+++ b/lib/hwasan/hwasan_report.h
@@ -21,12 +21,12 @@
namespace __hwasan {
-void ReportInvalidAccess(StackTrace *stack, u32 origin);
void ReportStats();
-void ReportInvalidAccessInsideAddressRange(const char *what, const void *start,
- uptr size, uptr offset);
void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
- bool is_store);
+ bool is_store, bool fatal);
+void ReportInvalidFree(StackTrace *stack, uptr addr);
+void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size,
+ uptr tail_size, const u8 *expected);
void ReportAtExitStatistics();
diff --git a/lib/hwasan/hwasan_thread.cc b/lib/hwasan/hwasan_thread.cc
index b50c0fc76be4..631c2813eb06 100644
--- a/lib/hwasan/hwasan_thread.cc
+++ b/lib/hwasan/hwasan_thread.cc
@@ -5,8 +5,11 @@
#include "hwasan_poisoning.h"
#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
namespace __hwasan {
static u32 RandomSeed() {
@@ -22,69 +25,70 @@ static u32 RandomSeed() {
return seed;
}
-HwasanThread *HwasanThread::Create(thread_callback_t start_routine,
- void *arg) {
- uptr PageSize = GetPageSizeCached();
- uptr size = RoundUpTo(sizeof(HwasanThread), PageSize);
- HwasanThread *thread = (HwasanThread*)MmapOrDie(size, __func__);
- thread->start_routine_ = start_routine;
- thread->arg_ = arg;
- thread->destructor_iterations_ = GetPthreadDestructorIterations();
- thread->random_state_ = flags()->random_tags ? RandomSeed() : 0;
-
- return thread;
-}
-
-void HwasanThread::SetThreadStackAndTls() {
- uptr tls_size = 0;
- uptr stack_size = 0;
- GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size,
- &tls_begin_, &tls_size);
+void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
+ static u64 unique_id;
+ unique_id_ = unique_id++;
+ random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
+ if (auto sz = flags()->heap_history_size)
+ heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
+
+ HwasanTSDThreadInit(); // Only needed with interceptors.
+ uptr *ThreadLong = GetCurrentThreadLongPtr();
+ // The following implicitly sets (this) as the current thread.
+ stack_allocations_ = new (ThreadLong)
+ StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size);
+ // Check that it worked.
+ CHECK_EQ(GetCurrentThread(), this);
+
+ // ScopedTaggingDisable needs GetCurrentThread to be set up.
+ ScopedTaggingDisabler disabler;
+
+ uptr tls_size;
+ uptr stack_size;
+ GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
+ &tls_size);
stack_top_ = stack_bottom_ + stack_size;
tls_end_ = tls_begin_ + tls_size;
- int local;
- CHECK(AddrIsInStack((uptr)&local));
-}
-
-void HwasanThread::Init() {
- SetThreadStackAndTls();
- CHECK(MEM_IS_APP(stack_bottom_));
- CHECK(MEM_IS_APP(stack_top_ - 1));
-}
+ if (stack_bottom_) {
+ int local;
+ CHECK(AddrIsInStack((uptr)&local));
+ CHECK(MemIsApp(stack_bottom_));
+ CHECK(MemIsApp(stack_top_ - 1));
+ }
-void HwasanThread::TSDDtor(void *tsd) {
- HwasanThread *t = (HwasanThread*)tsd;
- t->Destroy();
+ if (flags()->verbose_threads) {
+ if (IsMainThread()) {
+ Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
+ sizeof(Thread), heap_allocations_->SizeInBytes(),
+ stack_allocations_->size() * sizeof(uptr));
+ }
+ Print("Creating : ");
+ }
}
-void HwasanThread::ClearShadowForThreadStackAndTLS() {
- TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0);
+void Thread::ClearShadowForThreadStackAndTLS() {
+ if (stack_top_ != stack_bottom_)
+ TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_)
TagMemory(tls_begin_, tls_end_ - tls_begin_, 0);
}
-void HwasanThread::Destroy() {
- malloc_storage().CommitBack();
+void Thread::Destroy() {
+ if (flags()->verbose_threads)
+ Print("Destroying: ");
+ AllocatorSwallowThreadLocalCache(allocator_cache());
ClearShadowForThreadStackAndTLS();
- uptr size = RoundUpTo(sizeof(HwasanThread), GetPageSizeCached());
- UnmapOrDie(this, size);
+ if (heap_allocations_)
+ heap_allocations_->Delete();
DTLS_Destroy();
}
-thread_return_t HwasanThread::ThreadStart() {
- Init();
-
- if (!start_routine_) {
- // start_routine_ == 0 if we're on the main thread or on one of the
- // OS X libdispatch worker threads. But nobody is supposed to call
- // ThreadStart() for the worker threads.
- return 0;
- }
-
- thread_return_t res = start_routine_(arg_);
-
- return res;
+void Thread::Print(const char *Prefix) {
+ Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix,
+ unique_id_, this, stack_bottom(), stack_top(),
+ stack_top() - stack_bottom(),
+ tls_begin(), tls_end());
}
static u32 xorshift(u32 state) {
@@ -95,7 +99,8 @@ static u32 xorshift(u32 state) {
}
// Generate a (pseudo-)random non-zero tag.
-tag_t HwasanThread::GenerateRandomTag() {
+tag_t Thread::GenerateRandomTag() {
+ if (tagging_disabled_) return 0;
tag_t tag;
do {
if (flags()->random_tags) {
diff --git a/lib/hwasan/hwasan_thread.h b/lib/hwasan/hwasan_thread.h
index 1e482adeac84..4830473f4aab 100644
--- a/lib/hwasan/hwasan_thread.h
+++ b/lib/hwasan/hwasan_thread.h
@@ -1,4 +1,4 @@
-//===-- hwasan_thread.h -------------------------------------------*- C++ -*-===//
+//===-- hwasan_thread.h -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,23 +16,23 @@
#include "hwasan_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_ring_buffer.h"
namespace __hwasan {
-class HwasanThread {
+typedef __sanitizer::CompactRingBuffer<uptr> StackAllocationsRingBuffer;
+
+class Thread {
public:
- static HwasanThread *Create(thread_callback_t start_routine, void *arg);
- static void TSDDtor(void *tsd);
+ void Init(uptr stack_buffer_start, uptr stack_buffer_size); // Must be called from the thread itself.
void Destroy();
- void Init(); // Should be called from the thread itself.
- thread_return_t ThreadStart();
-
uptr stack_top() { return stack_top_; }
uptr stack_bottom() { return stack_bottom_; }
+ uptr stack_size() { return stack_top() - stack_bottom(); }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
- bool IsMainThread() { return start_routine_ == nullptr; }
+ bool IsMainThread() { return unique_id_ == 0; }
bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;
@@ -50,19 +50,28 @@ class HwasanThread {
void EnterInterceptorScope() { in_interceptor_scope_++; }
void LeaveInterceptorScope() { in_interceptor_scope_--; }
- HwasanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
+ AllocatorCache *allocator_cache() { return &allocator_cache_; }
+ HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
+ StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
tag_t GenerateRandomTag();
- int destructor_iterations_;
+ void DisableTagging() { tagging_disabled_++; }
+ void EnableTagging() { tagging_disabled_--; }
+ bool TaggingIsDisabled() const { return tagging_disabled_; }
+
+ u64 unique_id() const { return unique_id_; }
+ void Announce() {
+ if (announced_) return;
+ announced_ = true;
+ Print("Thread: ");
+ }
private:
- // NOTE: There is no HwasanThread constructor. It is allocated
+ // NOTE: There is no Thread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
- void SetThreadStackAndTls();
void ClearShadowForThreadStackAndTLS();
- thread_callback_t start_routine_;
- void *arg_;
+ void Print(const char *prefix);
uptr stack_top_;
uptr stack_bottom_;
uptr tls_begin_;
@@ -75,11 +84,30 @@ class HwasanThread {
u32 random_state_;
u32 random_buffer_;
- HwasanThreadLocalMallocStorage malloc_storage_;
+ AllocatorCache allocator_cache_;
+ HeapAllocationsRingBuffer *heap_allocations_;
+ StackAllocationsRingBuffer *stack_allocations_;
+
+ static void InsertIntoThreadList(Thread *t);
+ static void RemoveFromThreadList(Thread *t);
+ Thread *next_; // All live threads form a linked list.
+
+ u64 unique_id_; // counting from zero.
+
+ u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
+
+ bool announced_;
+
+ friend struct ThreadListHead;
};
-HwasanThread *GetCurrentThread();
-void SetCurrentThread(HwasanThread *t);
+Thread *GetCurrentThread();
+uptr *GetCurrentThreadLongPtr();
+
+struct ScopedTaggingDisabler {
+ ScopedTaggingDisabler() { GetCurrentThread()->DisableTagging(); }
+ ~ScopedTaggingDisabler() { GetCurrentThread()->EnableTagging(); }
+};
} // namespace __hwasan
diff --git a/lib/hwasan/hwasan_thread_list.cc b/lib/hwasan/hwasan_thread_list.cc
new file mode 100644
index 000000000000..a31eee84ed93
--- /dev/null
+++ b/lib/hwasan/hwasan_thread_list.cc
@@ -0,0 +1,15 @@
+#include "hwasan_thread_list.h"
+
+namespace __hwasan {
+static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)];
+static HwasanThreadList *hwasan_thread_list;
+
+HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; }
+
+void InitThreadList(uptr storage, uptr size) {
+ CHECK(hwasan_thread_list == nullptr);
+ hwasan_thread_list =
+ new (thread_list_placeholder) HwasanThreadList(storage, size);
+}
+
+} // namespace
diff --git a/lib/hwasan/hwasan_thread_list.h b/lib/hwasan/hwasan_thread_list.h
new file mode 100644
index 000000000000..53747b51fd65
--- /dev/null
+++ b/lib/hwasan/hwasan_thread_list.h
@@ -0,0 +1,200 @@
+//===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+// HwasanThreadList is a registry for live threads, as well as an allocator for
+// HwasanThread objects and their stack history ring buffers. There are
+// constraints on memory layout of the shadow region and CompactRingBuffer that
+// are part of the ABI contract between compiler-rt and llvm.
+//
+// * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
+// * All stack ring buffers are located within (2**kShadowBaseAlignment)
+// sized region below and adjacent to the shadow region.
+// * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
+// aligned to twice its size. The value of N can be different for each buffer.
+//
+// These constrains guarantee that, given an address A of any element of the
+// ring buffer,
+// A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
+// is the address of the next element of that ring buffer (with wrap-around).
+// And, with K = kShadowBaseAlignment,
+// S = (A | ((1 << K) - 1)) + 1
+// (align up to kShadowBaseAlignment) is the start of the shadow region.
+//
+// These calculations are used in compiler instrumentation to update the ring
+// buffer and obtain the base address of shadow using only two inputs: address
+// of the current element of the ring buffer, and N (i.e. size of the ring
+// buffer). Since the value of N is very limited, we pack both inputs into a
+// single thread-local word as
+// (1 << (N + 56)) | A
+// See the implementation of class CompactRingBuffer, which is what is stored in
+// said thread-local word.
+//
+// Note the unusual way of aligning up the address of the shadow:
+// (A | ((1 << K) - 1)) + 1
+// It is only correct if A is not already equal to the shadow base address, but
+// it saves 2 instructions on AArch64.
+
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_flags.h"
+#include "hwasan_thread.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __hwasan {
+
+static uptr RingBufferSize() {
+ uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
+ // FIXME: increase the limit to 8 once this bug is fixed:
+ // https://bugs.llvm.org/show_bug.cgi?id=39030
+ for (int shift = 1; shift < 7; ++shift) {
+ uptr size = 4096 * (1ULL << shift);
+ if (size >= desired_bytes)
+ return size;
+ }
+ Printf("stack history size too large: %d\n", flags()->stack_history_size);
+ CHECK(0);
+ return 0;
+}
+
+struct ThreadListHead {
+ Thread *list_;
+
+ ThreadListHead() : list_(nullptr) {}
+
+ void Push(Thread *t) {
+ t->next_ = list_;
+ list_ = t;
+ }
+
+ Thread *Pop() {
+ Thread *t = list_;
+ if (t)
+ list_ = t->next_;
+ return t;
+ }
+
+ void Remove(Thread *t) {
+ Thread **cur = &list_;
+ while (*cur != t) cur = &(*cur)->next_;
+ CHECK(*cur && "thread not found");
+ *cur = (*cur)->next_;
+ }
+
+ template <class CB>
+ void ForEach(CB cb) {
+ Thread *t = list_;
+ while (t) {
+ cb(t);
+ t = t->next_;
+ }
+ }
+};
+
+struct ThreadStats {
+ uptr n_live_threads;
+ uptr total_stack_size;
+};
+
+class HwasanThreadList {
+ public:
+ HwasanThreadList(uptr storage, uptr size)
+ : free_space_(storage),
+ free_space_end_(storage + size),
+ ring_buffer_size_(RingBufferSize()) {}
+
+ Thread *CreateCurrentThread() {
+ Thread *t;
+ {
+ SpinMutexLock l(&list_mutex_);
+ t = free_list_.Pop();
+ if (t)
+ internal_memset((void *)t, 0, sizeof(Thread) + ring_buffer_size_);
+ else
+ t = AllocThread();
+ live_list_.Push(t);
+ }
+ t->Init((uptr)(t + 1), ring_buffer_size_);
+ AddThreadStats(t);
+ return t;
+ }
+
+ void ReleaseThread(Thread *t) {
+ // FIXME: madvise away the ring buffer?
+ RemoveThreadStats(t);
+ t->Destroy();
+ SpinMutexLock l(&list_mutex_);
+ live_list_.Remove(t);
+ free_list_.Push(t);
+ }
+
+ Thread *GetThreadByBufferAddress(uptr p) {
+ uptr align = ring_buffer_size_ * 2;
+ return (Thread *)(RoundDownTo(p, align) - sizeof(Thread));
+ }
+
+ uptr MemoryUsedPerThread() {
+ uptr res = sizeof(Thread) + ring_buffer_size_;
+ if (auto sz = flags()->heap_history_size)
+ res += HeapAllocationsRingBuffer::SizeInBytes(sz);
+ return res;
+ }
+
+ template <class CB>
+ void VisitAllLiveThreads(CB cb) {
+ SpinMutexLock l(&list_mutex_);
+ live_list_.ForEach(cb);
+ }
+
+ void AddThreadStats(Thread *t) {
+ SpinMutexLock l(&stats_mutex_);
+ stats_.n_live_threads++;
+ stats_.total_stack_size += t->stack_size();
+ }
+
+ void RemoveThreadStats(Thread *t) {
+ SpinMutexLock l(&stats_mutex_);
+ stats_.n_live_threads--;
+ stats_.total_stack_size -= t->stack_size();
+ }
+
+ ThreadStats GetThreadStats() {
+ SpinMutexLock l(&stats_mutex_);
+ return stats_;
+ }
+
+ private:
+ Thread *AllocThread() {
+ uptr align = ring_buffer_size_ * 2;
+ uptr ring_buffer_start = RoundUpTo(free_space_ + sizeof(Thread), align);
+ free_space_ = ring_buffer_start + ring_buffer_size_;
+ CHECK(free_space_ <= free_space_end_ && "out of thread memory");
+ return (Thread *)(ring_buffer_start - sizeof(Thread));
+ }
+
+ uptr free_space_;
+ uptr free_space_end_;
+ uptr ring_buffer_size_;
+
+ ThreadListHead free_list_;
+ ThreadListHead live_list_;
+ SpinMutex list_mutex_;
+
+ ThreadStats stats_;
+ SpinMutex stats_mutex_;
+};
+
+void InitThreadList(uptr storage, uptr size);
+HwasanThreadList &hwasanThreadList();
+
+} // namespace