aboutsummaryrefslogtreecommitdiffstats
path: root/lib/hwasan
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2018-07-28 11:06:48 +0000
committerDimitry Andric <dim@FreeBSD.org>2018-07-28 11:06:48 +0000
commit93c1b73a09a52d4a265f683bf1954b08bb430049 (patch)
tree5543464d74945196cc890e9d9099e5d0660df7eb /lib/hwasan
parent0d8e7490d6e8a13a8f0977d9b7771803b9f64ea0 (diff)
downloadsrc-93c1b73a09a52d4a265f683bf1954b08bb430049.tar.gz
src-93c1b73a09a52d4a265f683bf1954b08bb430049.zip
Vendor import of compiler-rt trunk r338150:vendor/compiler-rt/compiler-rt-trunk-r338150
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=336817 svn path=/vendor/compiler-rt/compiler-rt-trunk-r338150/; revision=336818; tag=vendor/compiler-rt/compiler-rt-trunk-r338150
Diffstat (limited to 'lib/hwasan')
-rw-r--r--lib/hwasan/.clang-format1
-rw-r--r--lib/hwasan/CMakeLists.txt52
-rw-r--r--lib/hwasan/hwasan.cc89
-rw-r--r--lib/hwasan/hwasan.h30
-rw-r--r--lib/hwasan/hwasan_allocator.cc84
-rw-r--r--lib/hwasan/hwasan_dynamic_shadow.cc132
-rw-r--r--lib/hwasan/hwasan_dynamic_shadow.h27
-rw-r--r--lib/hwasan/hwasan_flags.inc4
-rw-r--r--lib/hwasan/hwasan_interceptors.cc55
-rw-r--r--lib/hwasan/hwasan_interface_internal.h18
-rw-r--r--lib/hwasan/hwasan_linux.cc294
-rw-r--r--lib/hwasan/hwasan_mapping.h85
-rw-r--r--lib/hwasan/hwasan_new_delete.cc5
-rw-r--r--lib/hwasan/hwasan_poisoning.cc3
-rw-r--r--lib/hwasan/hwasan_report.cc10
-rw-r--r--lib/hwasan/hwasan_report.h36
-rw-r--r--lib/hwasan/hwasan_thread.cc39
-rw-r--r--lib/hwasan/hwasan_thread.h5
18 files changed, 752 insertions, 217 deletions
diff --git a/lib/hwasan/.clang-format b/lib/hwasan/.clang-format
index f6cb8ad931f5..560308c91dee 100644
--- a/lib/hwasan/.clang-format
+++ b/lib/hwasan/.clang-format
@@ -1 +1,2 @@
BasedOnStyle: Google
+AllowShortIfStatementsOnASingleLine: false
diff --git a/lib/hwasan/CMakeLists.txt b/lib/hwasan/CMakeLists.txt
index 3f3a6155590c..42bf4366f192 100644
--- a/lib/hwasan/CMakeLists.txt
+++ b/lib/hwasan/CMakeLists.txt
@@ -4,39 +4,38 @@ include_directories(..)
set(HWASAN_RTL_SOURCES
hwasan.cc
hwasan_allocator.cc
+ hwasan_dynamic_shadow.cc
hwasan_interceptors.cc
hwasan_linux.cc
+ hwasan_poisoning.cc
hwasan_report.cc
hwasan_thread.cc
- hwasan_poisoning.cc
)
set(HWASAN_RTL_CXX_SOURCES
hwasan_new_delete.cc)
+set(HWASAN_RTL_HEADERS
+ hwasan.h
+ hwasan_allocator.h
+ hwasan_dynamic_shadow.h
+ hwasan_flags.h
+ hwasan_flags.inc
+ hwasan_interface_internal.h
+ hwasan_mapping.h
+ hwasan_poisoning.h
+ hwasan_report.h
+ hwasan_thread.h)
+
set(HWASAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF HWASAN_RTL_CFLAGS)
-append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE HWASAN_RTL_CFLAGS)
+append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC HWASAN_RTL_CFLAGS)
# Prevent clang from generating libc calls.
append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding HWASAN_RTL_CFLAGS)
set(HWASAN_DYNAMIC_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS})
-if(ANDROID)
-# On Android, -z global does not do what it is documented to do.
-# On Android, -z global moves the library ahead in the lookup order,
-# placing it right after the LD_PRELOADs. This is used to compensate for the fact
-# that Android linker does not look at the dependencies of the main executable
-# that aren't dependencies of the current DSO when resolving symbols from said DSO.
-# As a net result, this allows running ASan executables without LD_PRELOAD-ing the
-# ASan runtime library.
-# The above is applicable to L MR1 or newer.
- if (COMPILER_RT_HAS_Z_GLOBAL)
- list(APPEND HWASAN_DYNAMIC_LINK_FLAGS -Wl,-z,global)
- endif()
-endif()
-
set(HWASAN_DYNAMIC_CFLAGS ${HWASAN_RTL_CFLAGS})
append_list_if(COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC
-ftls-model=initial-exec HWASAN_DYNAMIC_CFLAGS)
@@ -55,13 +54,18 @@ add_compiler_rt_component(hwasan)
add_compiler_rt_object_libraries(RTHwasan
ARCHS ${HWASAN_SUPPORTED_ARCH}
- SOURCES ${HWASAN_RTL_SOURCES} CFLAGS ${HWASAN_RTL_CFLAGS})
+ SOURCES ${HWASAN_RTL_SOURCES}
+ ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS}
+ CFLAGS ${HWASAN_RTL_CFLAGS})
add_compiler_rt_object_libraries(RTHwasan_cxx
ARCHS ${HWASAN_SUPPORTED_ARCH}
- SOURCES ${HWASAN_RTL_CXX_SOURCES} CFLAGS ${HWASAN_RTL_CFLAGS})
+ SOURCES ${HWASAN_RTL_CXX_SOURCES}
+ ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS}
+ CFLAGS ${HWASAN_RTL_CFLAGS})
add_compiler_rt_object_libraries(RTHwasan_dynamic
ARCHS ${HWASAN_SUPPORTED_ARCH}
- SOURCES ${HWASAN_RTL_SOURCES} ${TSAN_RTL_CXX_SOURCES}
+ SOURCES ${HWASAN_RTL_SOURCES} ${HWASAN_RTL_CXX_SOURCES}
+ ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS}
CFLAGS ${HWASAN_DYNAMIC_CFLAGS})
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/dummy.cc "")
@@ -78,7 +82,9 @@ foreach(arch ${HWASAN_SUPPORTED_ARCH})
RTInterception
RTSanitizerCommon
RTSanitizerCommonLibc
- RTUbsan
+ RTSanitizerCommonCoverage
+ RTSanitizerCommonSymbolizer
+ RTUbsan
CFLAGS ${HWASAN_RTL_CFLAGS}
PARENT_TARGET hwasan)
add_compiler_rt_runtime(clang_rt.hwasan_cxx
@@ -112,7 +118,9 @@ foreach(arch ${HWASAN_SUPPORTED_ARCH})
RTInterception
RTSanitizerCommon
RTSanitizerCommonLibc
- RTUbsan
+ RTSanitizerCommonCoverage
+ RTSanitizerCommonSymbolizer
+ RTUbsan
# The only purpose of RTHWAsan_dynamic_version_script_dummy is to
# carry a dependency of the shared runtime on the version script.
# Replacing it with a straightforward
@@ -126,7 +134,7 @@ foreach(arch ${HWASAN_SUPPORTED_ARCH})
DEFS ${ASAN_DYNAMIC_DEFINITIONS}
PARENT_TARGET hwasan)
- if(UNIX)
+ if(SANITIZER_USE_SYMBOLS)
add_sanitizer_rt_symbols(clang_rt.hwasan
ARCHS ${arch}
EXTRA hwasan.syms.extra)
diff --git a/lib/hwasan/hwasan.cc b/lib/hwasan/hwasan.cc
index 8b1e5a7846af..7dab8249e3f7 100644
--- a/lib/hwasan/hwasan.cc
+++ b/lib/hwasan/hwasan.cc
@@ -1,4 +1,4 @@
-//===-- hwasan.cc -----------------------------------------------------------===//
+//===-- hwasan.cc ---------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,8 +13,10 @@
//===----------------------------------------------------------------------===//
#include "hwasan.h"
-#include "hwasan_thread.h"
+#include "hwasan_mapping.h"
#include "hwasan_poisoning.h"
+#include "hwasan_report.h"
+#include "hwasan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -84,7 +86,7 @@ static void InitializeFlags() {
cf.check_printf = false;
cf.intercept_tls_get_addr = true;
cf.exitcode = 99;
- cf.handle_sigill = kHandleSignalExclusive;
+ cf.handle_sigtrap = kHandleSignalExclusive;
OverrideCommonFlags(cf);
}
@@ -143,12 +145,22 @@ void PrintWarning(uptr pc, uptr bp) {
ReportInvalidAccess(&stack, 0);
}
+static void HWAsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
+ line, cond, (uptr)v1, (uptr)v2);
+ PRINT_CURRENT_STACK_CHECK();
+ Die();
+}
+
} // namespace __hwasan
// Interface.
using namespace __hwasan;
+uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol.
+
void __hwasan_init() {
CHECK(!hwasan_init_is_running);
if (hwasan_inited) return;
@@ -160,23 +172,28 @@ void __hwasan_init() {
CacheBinaryName();
InitializeFlags();
- __sanitizer_set_report_path(common_flags()->log_path);
+ // Install tool-specific callbacks in sanitizer_common.
+ SetCheckFailedCallback(HWAsanCheckFailed);
- InitializeInterceptors();
- InstallDeadlySignalHandlers(HwasanOnDeadlySignal);
- InstallAtExitHandler(); // Needs __cxa_atexit interceptor.
+ __sanitizer_set_report_path(common_flags()->log_path);
DisableCoreDumperIfNecessary();
if (!InitShadow()) {
- Printf("FATAL: HWAddressSanitizer can not mmap the shadow memory.\n");
- Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
- Printf("FATAL: Disabling ASLR is known to cause this error.\n");
- Printf("FATAL: If running under GDB, try "
- "'set disable-randomization off'.\n");
+ Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
+ if (HWASAN_FIXED_MAPPING) {
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
+ Printf("FATAL: Disabling ASLR is known to cause this error.\n");
+ Printf("FATAL: If running under GDB, try "
+ "'set disable-randomization off'.\n");
+ }
DumpProcessMap();
Die();
}
+ InitializeInterceptors();
+ InstallDeadlySignalHandlers(HwasanOnDeadlySignal);
+ InstallAtExitHandler(); // Needs __cxa_atexit interceptor.
+
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
@@ -240,11 +257,23 @@ void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
template<unsigned X>
__attribute__((always_inline))
-static void SigIll() {
+static void SigTrap(uptr p) {
#if defined(__aarch64__)
- asm("hlt %0\n\t" ::"n"(X));
-#elif defined(__x86_64__) || defined(__i386__)
- asm("ud2\n\t");
+ (void)p;
+ // 0x900 is added to do not interfere with the kernel use of lower values of
+ // brk immediate.
+ // FIXME: Add a constraint to put the pointer into x0, the same as x86 branch.
+ asm("brk %0\n\t" ::"n"(0x900 + X));
+#elif defined(__x86_64__)
+ // INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
+ // total. The pointer is passed via rdi.
+ // 0x40 is added as a safeguard, to help distinguish our trap from others and
+ // to avoid 0 offsets in the command (otherwise it'll be reduced to a
+ // different nop command, the three bytes one).
+ asm volatile(
+ "int3\n"
+ "nopl %c0(%%rax)\n"
+ :: "n"(0x40 + X), "D"(p));
#else
// FIXME: not always sigill.
__builtin_trap();
@@ -261,8 +290,8 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MEM_TO_SHADOW(ptr_raw);
if (UNLIKELY(ptr_tag != mem_tag)) {
- SigIll<0x100 + 0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + LogSize>();
+ SigTrap<0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + LogSize>(p);
if (EA == ErrorAction::Abort) __builtin_unreachable();
}
}
@@ -277,13 +306,13 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
tag_t *shadow_last = (tag_t *)MEM_TO_SHADOW(ptr_raw + sz - 1);
for (tag_t *t = shadow_first; t <= shadow_last; ++t)
if (UNLIKELY(ptr_tag != *t)) {
- SigIll<0x100 + 0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + 0xf>();
+ SigTrap<0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + 0xf>(p);
if (EA == ErrorAction::Abort) __builtin_unreachable();
}
}
-void __hwasan_load(uptr p, uptr sz) {
+void __hwasan_loadN(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
}
void __hwasan_load1(uptr p) {
@@ -302,7 +331,7 @@ void __hwasan_load16(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
}
-void __hwasan_load_noabort(uptr p, uptr sz) {
+void __hwasan_loadN_noabort(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
}
void __hwasan_load1_noabort(uptr p) {
@@ -321,7 +350,7 @@ void __hwasan_load16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
}
-void __hwasan_store(uptr p, uptr sz) {
+void __hwasan_storeN(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
}
void __hwasan_store1(uptr p) {
@@ -340,7 +369,7 @@ void __hwasan_store16(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
}
-void __hwasan_store_noabort(uptr p, uptr sz) {
+void __hwasan_storeN_noabort(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
}
void __hwasan_store1_noabort(uptr p) {
@@ -359,6 +388,18 @@ void __hwasan_store16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
}
+void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
+ TagMemoryAligned(p, sz, tag);
+}
+
+static const u8 kFallbackTag = 0xBB;
+
+u8 __hwasan_generate_tag() {
+ HwasanThread *t = GetCurrentThread();
+ if (!t) return kFallbackTag;
+ return t->GenerateRandomTag();
+}
+
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
diff --git a/lib/hwasan/hwasan.h b/lib/hwasan/hwasan.h
index bcf5282dce7c..47d1d057a0dd 100644
--- a/lib/hwasan/hwasan.h
+++ b/lib/hwasan/hwasan.h
@@ -1,4 +1,4 @@
-//===-- hwasan.h --------------------------------------------------*- C++ -*-===//
+//===-- hwasan.h ------------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -32,16 +32,6 @@
typedef u8 tag_t;
-// Reasonable values are 4 (for 1/16th shadow) and 6 (for 1/64th).
-const uptr kShadowScale = 4;
-const uptr kShadowAlignment = 1UL << kShadowScale;
-
-#define MEM_TO_SHADOW_OFFSET(mem) ((uptr)(mem) >> kShadowScale)
-#define MEM_TO_SHADOW(mem) ((uptr)(mem) >> kShadowScale)
-#define SHADOW_TO_MEM(shadow) ((uptr)(shadow) << kShadowScale)
-
-#define MEM_IS_APP(mem) true
-
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
// translation and can be used to store a tag.
const unsigned kAddressTagShift = 56;
@@ -107,15 +97,6 @@ void PrintWarning(uptr pc, uptr bp);
void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
void *context, bool request_fast_unwind);
-void ReportInvalidAccess(StackTrace *stack, u32 origin);
-void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
- bool is_store);
-void ReportStats();
-void ReportAtExitStatistics();
-void DescribeMemoryRange(const void *x, uptr size);
-void ReportInvalidAccessInsideAddressRange(const char *what, const void *start, uptr size,
- uptr offset);
-
// Returns a "chained" origin id, pointing to the given stack trace followed by
// the previous origin id.
u32 ChainOrigin(u32 id, StackTrace *stack);
@@ -135,6 +116,15 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
GetStackTrace(&stack, kStackTraceMax, pc, bp, nullptr, \
common_flags()->fast_unwind_on_fatal)
+#define GET_FATAL_STACK_TRACE_HERE \
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
+
+#define PRINT_CURRENT_STACK_CHECK() \
+ { \
+ GET_FATAL_STACK_TRACE_HERE; \
+ stack.Print(); \
+ }
+
class ScopedThreadLocalStateBackup {
public:
ScopedThreadLocalStateBackup() { Backup(); }
diff --git a/lib/hwasan/hwasan_allocator.cc b/lib/hwasan/hwasan_allocator.cc
index fbcaf78b88f0..c2b9b0b69589 100644
--- a/lib/hwasan/hwasan_allocator.cc
+++ b/lib/hwasan/hwasan_allocator.cc
@@ -1,4 +1,4 @@
-//===-- hwasan_allocator.cc --------------------------- ---------------------===//
+//===-- hwasan_allocator.cc ------------------------- ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,11 +15,13 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "hwasan.h"
#include "hwasan_allocator.h"
+#include "hwasan_mapping.h"
#include "hwasan_thread.h"
#include "hwasan_poisoning.h"
@@ -70,8 +72,8 @@ struct HwasanMapUnmapCallback {
}
};
-#if !defined(__aarch64__)
-#error unsupported platform
+#if !defined(__aarch64__) && !defined(__x86_64__)
+#error Unsupported platform
#endif
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
@@ -100,6 +102,9 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
static atomic_uint8_t hwasan_allocator_tagging_enabled;
+static const tag_t kFallbackAllocTag = 0xBB;
+static const tag_t kFallbackFreeTag = 0xBC;
+
void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
@@ -123,9 +128,12 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
size = RoundUpTo(size, kShadowAlignment);
if (size > kMaxAllowedMallocSize) {
- Report("WARNING: HWAddressSanitizer failed to allocate %p bytes\n",
- (void *)size);
- return Allocator::FailureHandler::OnBadRequest();
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
+ size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack);
}
HwasanThread *t = GetCurrentThread();
void *allocated;
@@ -137,6 +145,12 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, size, alignment);
}
+ if (UNLIKELY(!allocated)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, stack);
+ }
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->state = CHUNK_ALLOCATED;
@@ -145,10 +159,11 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (zeroise)
internal_memset(allocated, 0, size);
- void *user_ptr = (flags()->tag_in_malloc &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
- ? (void *)TagMemoryAligned((uptr)allocated, size, 0xBB)
- : allocated;
+ void *user_ptr = allocated;
+ if (flags()->tag_in_malloc &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
+ user_ptr = (void *)TagMemoryAligned(
+ (uptr)user_ptr, size, t ? t->GenerateRandomTag() : kFallbackAllocTag);
HWASAN_MALLOC_HOOK(user_ptr, size);
return user_ptr;
@@ -166,10 +181,11 @@ void HwasanDeallocate(StackTrace *stack, void *user_ptr) {
meta->free_context_id = StackDepotPut(*stack);
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
+ HwasanThread *t = GetCurrentThread();
if (flags()->tag_in_free &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
- TagMemoryAligned((uptr)p, size, 0xBC);
- HwasanThread *t = GetCurrentThread();
+ TagMemoryAligned((uptr)p, size,
+ t ? t->GenerateRandomTag() : kFallbackFreeTag);
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocator.Deallocate(cache, p);
@@ -195,8 +211,12 @@ void *HwasanReallocate(StackTrace *stack, void *user_old_p, uptr new_size,
meta->requested_size = new_size;
if (!atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
return user_old_p;
- if (flags()->retag_in_realloc)
- return (void *)TagMemoryAligned((uptr)old_p, new_size, 0xCC);
+ if (flags()->retag_in_realloc) {
+ HwasanThread *t = GetCurrentThread();
+ return (void *)TagMemoryAligned(
+ (uptr)old_p, new_size,
+ t ? t->GenerateRandomTag() : kFallbackAllocTag);
+ }
if (new_size > old_size) {
tag_t tag = GetTagFromPointer((uptr)user_old_p);
TagMemoryAligned((uptr)old_p + old_size, new_size - old_size, tag);
@@ -212,6 +232,15 @@ void *HwasanReallocate(StackTrace *stack, void *user_old_p, uptr new_size,
return new_p;
}
+void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, stack);
+ }
+ return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
+}
+
HwasanChunkView FindHeapChunkByAddress(uptr address) {
void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
if (!block)
@@ -235,9 +264,7 @@ void *hwasan_malloc(uptr size, StackTrace *stack) {
}
void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
- if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
- return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
- return SetErrnoOnNull(HwasanAllocate(stack, nmemb * size, sizeof(u64), true));
+ return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
}
void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
@@ -251,14 +278,17 @@ void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
}
void *hwasan_valloc(uptr size, StackTrace *stack) {
- return SetErrnoOnNull(HwasanAllocate(stack, size, GetPageSizeCached(), false));
+ return SetErrnoOnNull(
+ HwasanAllocate(stack, size, GetPageSizeCached(), false));
}
void *hwasan_pvalloc(uptr size, StackTrace *stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
- return Allocator::FailureHandler::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, stack);
}
// pvalloc(0) should allocate one page.
size = size ? RoundUpTo(size, PageSize) : PageSize;
@@ -268,7 +298,9 @@ void *hwasan_pvalloc(uptr size, StackTrace *stack) {
void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, stack);
}
return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
}
@@ -276,7 +308,9 @@ void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, stack);
}
return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
}
@@ -284,18 +318,20 @@ void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
- Allocator::FailureHandler::OnBadRequest();
- return errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, stack);
}
void *ptr = HwasanAllocate(stack, size, alignment, false);
if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by HwasanAllocate.
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
}
-} // namespace __hwasan
+} // namespace __hwasan
using namespace __hwasan;
diff --git a/lib/hwasan/hwasan_dynamic_shadow.cc b/lib/hwasan/hwasan_dynamic_shadow.cc
new file mode 100644
index 000000000000..17338003aa65
--- /dev/null
+++ b/lib/hwasan/hwasan_dynamic_shadow.cc
@@ -0,0 +1,132 @@
+//===-- hwasan_dynamic_shadow.cc --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory
+/// region and handles ifunc resolver case, when necessary.
+///
+//===----------------------------------------------------------------------===//
+
+#include "hwasan_dynamic_shadow.h"
+#include "hwasan_mapping.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_posix.h"
+
+// The code in this file needs to run in an unrelocated binary. It should not
+// access any external symbol, including its own non-hidden globals.
+
+namespace __hwasan {
+
+static void UnmapFromTo(uptr from, uptr to) {
+ if (to == from)
+ return;
+ CHECK(to >= from);
+ uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
+ if (UNLIKELY(internal_iserror(res))) {
+ Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, to - from, to - from, from);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+// Returns an address aligned to 8 pages, such that one page on the left and
+// shadow_size_bytes bytes on the right of it are mapped r/o.
+static uptr MapDynamicShadow(uptr shadow_size_bytes) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment = granularity * SHADOW_GRANULARITY;
+ const uptr left_padding = granularity;
+ const uptr shadow_size =
+ RoundUpTo(shadow_size_bytes, granularity);
+ const uptr map_size = shadow_size + left_padding + alignment;
+
+ const uptr map_start = (uptr)MmapNoAccess(map_size);
+ CHECK_NE(map_start, ~(uptr)0);
+
+ const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
+
+ UnmapFromTo(map_start, shadow_start - left_padding);
+ UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
+
+ return shadow_start;
+}
+
+} // namespace __hwasan
+
+#if HWASAN_PREMAP_SHADOW
+
+extern "C" {
+
+INTERFACE_ATTRIBUTE void __hwasan_shadow();
+decltype(__hwasan_shadow)* __hwasan_premap_shadow();
+
+} // extern "C"
+
+namespace __hwasan {
+
+// Conservative upper limit.
+static uptr PremapShadowSize() {
+ return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale,
+ GetMmapGranularity());
+}
+
+static uptr PremapShadow() {
+ return MapDynamicShadow(PremapShadowSize());
+}
+
+static bool IsPremapShadowAvailable() {
+ const uptr shadow = reinterpret_cast<uptr>(&__hwasan_shadow);
+ const uptr resolver = reinterpret_cast<uptr>(&__hwasan_premap_shadow);
+ // shadow == resolver is how Android KitKat and older handles ifunc.
+ // shadow == 0 just in case.
+ return shadow != 0 && shadow != resolver;
+}
+
+static uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr shadow_start = reinterpret_cast<uptr>(&__hwasan_shadow);
+ const uptr premap_shadow_size = PremapShadowSize();
+ const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
+
+ // We may have mapped too much. Release extra memory.
+ UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
+ return shadow_start;
+}
+
+} // namespace __hwasan
+
+extern "C" {
+
+decltype(__hwasan_shadow)* __hwasan_premap_shadow() {
+ // The resolver might be called multiple times. Map the shadow just once.
+ static __sanitizer::uptr shadow = 0;
+ if (!shadow)
+ shadow = __hwasan::PremapShadow();
+ return reinterpret_cast<decltype(__hwasan_shadow)*>(shadow);
+}
+
+// __hwasan_shadow is a "function" that has the same address as the first byte
+// of the shadow mapping.
+INTERFACE_ATTRIBUTE __attribute__((ifunc("__hwasan_premap_shadow")))
+void __hwasan_shadow();
+
+} // extern "C"
+
+#endif // HWASAN_PREMAP_SHADOW
+
+namespace __hwasan {
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+#if HWASAN_PREMAP_SHADOW
+ if (IsPremapShadowAvailable())
+ return FindPremappedShadowStart(shadow_size_bytes);
+#endif
+ return MapDynamicShadow(shadow_size_bytes);
+}
+
+} // namespace __hwasan
diff --git a/lib/hwasan/hwasan_dynamic_shadow.h b/lib/hwasan/hwasan_dynamic_shadow.h
new file mode 100644
index 000000000000..b5e9e1dd6a06
--- /dev/null
+++ b/lib/hwasan/hwasan_dynamic_shadow.h
@@ -0,0 +1,27 @@
+//===-- hwasan_dynamic_shadow.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory
+/// region.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_PREMAP_SHADOW_H
+#define HWASAN_PREMAP_SHADOW_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __hwasan {
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes);
+
+} // namespace __hwasan
+
+#endif // HWASAN_PREMAP_SHADOW_H
diff --git a/lib/hwasan/hwasan_flags.inc b/lib/hwasan/hwasan_flags.inc
index a2cb701ae93d..c45781168d6c 100644
--- a/lib/hwasan/hwasan_flags.inc
+++ b/lib/hwasan/hwasan_flags.inc
@@ -27,3 +27,7 @@ HWASAN_FLAG(bool, atexit, false, "")
// Test only flag to disable malloc/realloc/free memory tagging on startup.
// Tagging can be reenabled with __hwasan_enable_allocator_tagging().
HWASAN_FLAG(bool, disable_allocator_tagging, false, "")
+
+// If false, use simple increment of a thread local counter to generate new
+// tags.
+HWASAN_FLAG(bool, random_tags, true, "")
diff --git a/lib/hwasan/hwasan_interceptors.cc b/lib/hwasan/hwasan_interceptors.cc
index fb39e9fdacf9..66aab95db56f 100644
--- a/lib/hwasan/hwasan_interceptors.cc
+++ b/lib/hwasan/hwasan_interceptors.cc
@@ -17,8 +17,10 @@
#include "interception/interception.h"
#include "hwasan.h"
+#include "hwasan_mapping.h"
#include "hwasan_thread.h"
#include "hwasan_poisoning.h"
+#include "hwasan_report.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
@@ -258,34 +260,17 @@ INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
INTERCEPTOR(void *, malloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!hwasan_init_is_running))
+ ENSURE_HWASAN_INITED();
if (UNLIKELY(!hwasan_inited))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size);
return hwasan_malloc(size, &stack);
}
-
-INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
- int fd, OFF_T offset) {
- if (hwasan_init_is_running)
- return REAL(mmap)(addr, length, prot, flags, fd, offset);
- ENSURE_HWASAN_INITED();
- if (addr && !MEM_IS_APP(addr)) {
- if (flags & map_fixed) {
- errno = errno_EINVAL;
- return (void *)-1;
- } else {
- addr = nullptr;
- }
- }
- void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);
- return res;
-}
-
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
- int fd, OFF64_T offset) {
- ENSURE_HWASAN_INITED();
+template <class Mmap>
+static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T sz, int prot,
+ int flags, int fd, OFF64_T off) {
if (addr && !MEM_IS_APP(addr)) {
if (flags & map_fixed) {
errno = errno_EINVAL;
@@ -294,13 +279,8 @@ INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
addr = nullptr;
}
}
- void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset);
- return res;
+ return real_mmap(addr, sz, prot, flags, fd, off);
}
-#define HWASAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64)
-#else
-#define HWASAN_MAYBE_INTERCEPT_MMAP64
-#endif
extern "C" int pthread_attr_init(void *attr);
extern "C" int pthread_attr_destroy(void *attr);
@@ -427,6 +407,22 @@ int OnExit() {
*begin = *end = 0; \
}
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin && \
+ MEM_IS_APP(GetAddressFromPointer(dst))) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+
+#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, fd, \
+ offset) \
+ do { \
+ return mmap_interceptor(REAL(mmap), addr, length, prot, flags, fd, \
+ offset); \
+ } while (false)
+
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
@@ -448,6 +444,7 @@ int OnExit() {
(void)(s); \
} while (false)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
@@ -459,8 +456,6 @@ void InitializeInterceptors() {
InitializeCommonInterceptors();
InitializeSignalInterceptors();
- INTERCEPT_FUNCTION(mmap);
- HWASAN_MAYBE_INTERCEPT_MMAP64;
INTERCEPT_FUNCTION(posix_memalign);
HWASAN_MAYBE_INTERCEPT_MEMALIGN;
INTERCEPT_FUNCTION(__libc_memalign);
diff --git a/lib/hwasan/hwasan_interface_internal.h b/lib/hwasan/hwasan_interface_internal.h
index 7e95271ac0af..b4e5c80904df 100644
--- a/lib/hwasan/hwasan_interface_internal.h
+++ b/lib/hwasan/hwasan_interface_internal.h
@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
extern "C" {
+
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_init();
@@ -32,7 +33,10 @@ using __sanitizer::u16;
using __sanitizer::u8;
SANITIZER_INTERFACE_ATTRIBUTE
-void __hwasan_load(uptr, uptr);
+extern uptr __hwasan_shadow_memory_dynamic_address;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load1(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -45,7 +49,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load16(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
-void __hwasan_load_noabort(uptr, uptr);
+void __hwasan_loadN_noabort(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load1_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -58,7 +62,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
-void __hwasan_store(uptr, uptr);
+void __hwasan_storeN(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store1(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -71,7 +75,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store16(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
-void __hwasan_store_noabort(uptr, uptr);
+void __hwasan_storeN_noabort(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store1_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -83,6 +87,12 @@ void __hwasan_store8_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store16_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u8 __hwasan_generate_tag();
+
// Returns the offset of the first tag mismatch or -1 if the whole range is
// good.
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/lib/hwasan/hwasan_linux.cc b/lib/hwasan/hwasan_linux.cc
index 48dea8eb6ff4..5ab98dca594d 100644
--- a/lib/hwasan/hwasan_linux.cc
+++ b/lib/hwasan/hwasan_linux.cc
@@ -1,4 +1,4 @@
-//===-- hwasan_linux.cc -----------------------------------------------------===//
+//===-- hwasan_linux.cc -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -6,41 +6,45 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
-//
-// This file is a part of HWAddressSanitizer.
-//
-// Linux-, NetBSD- and FreeBSD-specific code.
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
+/// FreeBSD-specific code.
+///
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "hwasan.h"
+#include "hwasan_dynamic_shadow.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_mapping.h"
+#include "hwasan_report.h"
#include "hwasan_thread.h"
#include <elf.h>
#include <link.h>
#include <pthread.h>
+#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
-#include <signal.h>
+#include <sys/resource.h>
+#include <sys/time.h>
#include <unistd.h>
#include <unwind.h>
-#include <sys/time.h>
-#include <sys/resource.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_procmaps.h"
namespace __hwasan {
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
+static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
CHECK_EQ((beg % GetMmapGranularity()), 0);
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- void *res = MmapFixedNoReserve(beg, size, name);
- if (res != (void *)beg) {
+ if (!MmapFixedNoReserve(beg, size, name)) {
Report(
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n",
@@ -52,8 +56,11 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
}
static void ProtectGap(uptr addr, uptr size) {
+ if (!size)
+ return;
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
+ if (addr == (uptr)res)
+ return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
@@ -63,63 +70,160 @@ static void ProtectGap(uptr addr, uptr size) {
addr += step;
size -= step;
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
+ if (addr == (uptr)res)
+ return;
}
}
Report(
- "ERROR: Failed to protect the shadow gap. "
- "ASan cannot proceed correctly. ABORTING.\n");
+ "ERROR: Failed to protect shadow gap [%p, %p]. "
+ "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr,
+ (void *)(addr + size));
DumpProcessMap();
Die();
}
-bool InitShadow() {
- const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
+static uptr kLowMemStart;
+static uptr kLowMemEnd;
+static uptr kLowShadowEnd;
+static uptr kLowShadowStart;
+static uptr kHighShadowStart;
+static uptr kHighShadowEnd;
+static uptr kHighMemStart;
+static uptr kHighMemEnd;
+
+static void PrintRange(uptr start, uptr end, const char *name) {
+ Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
+}
- // LowMem covers as much of the first 4GB as possible.
- const uptr kLowMemEnd = 1UL<<32;
- const uptr kLowShadowEnd = kLowMemEnd >> kShadowScale;
- const uptr kLowShadowStart = kLowShadowEnd >> kShadowScale;
+static void PrintAddressSpaceLayout() {
+ PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
+ if (kHighShadowEnd + 1 < kHighMemStart)
+ PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
+ PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
+ if (SHADOW_OFFSET) {
+ if (kLowShadowEnd + 1 < kHighShadowStart)
+ PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
+ PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
+ if (kLowMemEnd + 1 < kLowShadowStart)
+ PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
+ PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
+ CHECK_EQ(0, kLowMemStart);
+ } else {
+ if (kLowMemEnd + 1 < kHighShadowStart)
+ PrintRange(kLowMemEnd + 1, kHighShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
+ PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
+ CHECK_EQ(kLowShadowEnd + 1, kLowMemStart);
+ PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
+ PrintRange(0, kLowShadowStart - 1, "ShadowGap");
+ }
+}
+static uptr GetHighMemEnd() {
// HighMem covers the upper part of the address space.
- const uptr kHighShadowEnd = (maxVirtualAddress >> kShadowScale) + 1;
- const uptr kHighShadowStart = Max(kLowMemEnd, kHighShadowEnd >> kShadowScale);
- CHECK(kHighShadowStart < kHighShadowEnd);
-
- const uptr kHighMemStart = kHighShadowStart << kShadowScale;
- CHECK(kHighShadowEnd <= kHighMemStart);
-
- if (Verbosity()) {
- Printf("|| `[%p, %p]` || HighMem ||\n", (void *)kHighMemStart,
- (void *)maxVirtualAddress);
- if (kHighMemStart > kHighShadowEnd)
- Printf("|| `[%p, %p]` || ShadowGap2 ||\n", (void *)kHighShadowEnd,
- (void *)kHighMemStart);
- Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowStart,
- (void *)kHighShadowEnd);
- if (kHighShadowStart > kLowMemEnd)
- Printf("|| `[%p, %p]` || ShadowGap2 ||\n", (void *)kHighShadowEnd,
- (void *)kHighMemStart);
- Printf("|| `[%p, %p]` || LowMem ||\n", (void *)kLowShadowEnd,
- (void *)kLowMemEnd);
- Printf("|| `[%p, %p]` || LowShadow ||\n", (void *)kLowShadowStart,
- (void *)kLowShadowEnd);
- Printf("|| `[%p, %p]` || ShadowGap1 ||\n", (void *)0,
- (void *)kLowShadowStart);
+ uptr max_address = GetMaxUserVirtualAddress();
+ if (SHADOW_OFFSET)
+ // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
+ // properly aligned:
+ max_address |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
+ return max_address;
+}
+
+static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
+ // Set the shadow memory address to uninitialized.
+ __hwasan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+ uptr shadow_start = SHADOW_OFFSET;
+ // Detect if a dynamic shadow address must be used and find the available
+ // location when necessary. When dynamic address is used, the macro
+ // kLowShadowBeg expands to __hwasan_shadow_memory_dynamic_address which
+ // was just set to kDefaultShadowSentinel.
+ if (shadow_start == kDefaultShadowSentinel) {
+ __hwasan_shadow_memory_dynamic_address = 0;
+ CHECK_EQ(0, SHADOW_OFFSET);
+ shadow_start = FindDynamicShadowStart(shadow_size_bytes);
}
+ // Update the shadow memory address (potentially) used by instrumentation.
+ __hwasan_shadow_memory_dynamic_address = shadow_start;
+}
- ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd - 1, "low shadow");
- ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd - 1, "high shadow");
- ProtectGap(0, kLowShadowStart);
- if (kHighShadowStart > kLowMemEnd)
- ProtectGap(kLowMemEnd, kHighShadowStart - kLowMemEnd);
- if (kHighMemStart > kHighShadowEnd)
- ProtectGap(kHighShadowEnd, kHighMemStart - kHighShadowEnd);
+bool InitShadow() {
+ // Define the entire memory range.
+ kHighMemEnd = GetHighMemEnd();
+
+ // Determine shadow memory base offset.
+ InitializeShadowBaseAddress(MEM_TO_SHADOW_SIZE(kHighMemEnd));
+
+ // Place the low memory first.
+ if (SHADOW_OFFSET) {
+ kLowMemEnd = SHADOW_OFFSET - 1;
+ kLowMemStart = 0;
+ } else {
+ // LowMem covers as much of the first 4GB as possible.
+ kLowMemEnd = (1UL << 32) - 1;
+ kLowMemStart = MEM_TO_SHADOW(kLowMemEnd) + 1;
+ }
+
+ // Define the low shadow based on the already placed low memory.
+ kLowShadowEnd = MEM_TO_SHADOW(kLowMemEnd);
+ kLowShadowStart = SHADOW_OFFSET ? SHADOW_OFFSET : MEM_TO_SHADOW(kLowMemStart);
+
+ // High shadow takes whatever memory is left up there (making sure it is not
+ // interfering with low memory in the fixed case).
+ kHighShadowEnd = MEM_TO_SHADOW(kHighMemEnd);
+ kHighShadowStart = Max(kLowMemEnd, MEM_TO_SHADOW(kHighShadowEnd)) + 1;
+
+ // High memory starts where allocated shadow allows.
+ kHighMemStart = SHADOW_TO_MEM(kHighShadowStart);
+
+ // Check the sanity of the defined memory ranges (there might be gaps).
+ CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
+ CHECK_GT(kHighMemStart, kHighShadowEnd);
+ CHECK_GT(kHighShadowEnd, kHighShadowStart);
+ CHECK_GT(kHighShadowStart, kLowMemEnd);
+ CHECK_GT(kLowMemEnd, kLowMemStart);
+ CHECK_GT(kLowShadowEnd, kLowShadowStart);
+ if (SHADOW_OFFSET)
+ CHECK_GT(kLowShadowStart, kLowMemEnd);
+ else
+ CHECK_GT(kLowMemEnd, kLowShadowStart);
+
+ if (Verbosity())
+ PrintAddressSpaceLayout();
+
+ // Reserve shadow memory.
+ ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
+ ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
+
+ // Protect all the gaps.
+ ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
+ if (SHADOW_OFFSET) {
+ if (kLowMemEnd + 1 < kLowShadowStart)
+ ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
+ if (kLowShadowEnd + 1 < kHighShadowStart)
+ ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
+ } else {
+ if (kLowMemEnd + 1 < kHighShadowStart)
+ ProtectGap(kLowMemEnd + 1, kHighShadowStart - kLowMemEnd - 1);
+ }
+ if (kHighShadowEnd + 1 < kHighMemStart)
+ ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
return true;
}
+bool MemIsApp(uptr p) {
+ CHECK(GetTagFromPointer(p) == 0);
+ return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
+}
+
static void HwasanAtExit(void) {
if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
ReportStats();
@@ -177,50 +281,65 @@ struct AccessInfo {
bool recover;
};
-#if defined(__aarch64__)
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
- // Access type is encoded in HLT immediate as 0x1XY,
- // where X&1 is 1 for store, 0 for load,
- // and X&2 is 1 if the error is recoverable.
- // Valid values of Y are 0 to 4, which are interpreted as log2(access_size),
- // and 0xF, which means that access size is stored in X1 register.
- // Access address is always in X0 register.
- AccessInfo ai;
+ // Access type is passed in a platform dependent way (see below) and encoded
+ // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
+ // recoverable. Valid values of Y are 0 to 4, which are interpreted as
+ // log2(access_size), and 0xF, which means that access size is passed via
+ // platform dependent register (see below).
+#if defined(__aarch64__)
+ // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
+ // access size is stored in X1 register. Access address is always in X0
+ // register.
uptr pc = (uptr)info->si_addr;
- unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
- if ((code & 0xff00) != 0x100)
- return AccessInfo{0, 0, false, false}; // Not ours.
- bool is_store = code & 0x10;
- bool recover = code & 0x20;
- unsigned size_log = code & 0xf;
+ const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
+ if ((code & 0xff00) != 0x900)
+ return AccessInfo{}; // Not ours.
+
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const uptr addr = uc->uc_mcontext.regs[0];
+ const unsigned size_log = code & 0xf;
+ if (size_log > 4 && size_log != 0xf)
+ return AccessInfo{}; // Not ours.
+ const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
+
+#elif defined(__x86_64__)
+ // Access type is encoded in the instruction following INT3 as
+ // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
+ // RSI register. Access address is always in RDI register.
+ uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
+ uint8_t *nop = (uint8_t*)pc;
+ if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
+ *(nop + 3) < 0x40)
+ return AccessInfo{}; // Not ours.
+ const unsigned code = *(nop + 3);
+
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
+ const unsigned size_log = code & 0xf;
if (size_log > 4 && size_log != 0xf)
- return AccessInfo{0, 0, false, false}; // Not ours.
+ return AccessInfo{}; // Not ours.
+ const uptr size =
+ size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
- ai.is_store = is_store;
- ai.is_load = !is_store;
- ai.addr = uc->uc_mcontext.regs[0];
- if (size_log == 0xf)
- ai.size = uc->uc_mcontext.regs[1];
- else
- ai.size = 1U << size_log;
- ai.recover = recover;
- return ai;
-}
#else
-static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
- return AccessInfo{0, 0, false, false};
-}
+# error Unsupported architecture
#endif
-static bool HwasanOnSIGILL(int signo, siginfo_t *info, ucontext_t *uc) {
- SignalContext sig{info, uc};
+ return AccessInfo{addr, size, is_store, !is_store, recover};
+}
+
+static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
AccessInfo ai = GetAccessInfo(info, uc);
if (!ai.is_store && !ai.is_load)
return false;
- InternalScopedBuffer<BufferedStackTrace> stack_buffer(1);
+ InternalMmapVector<BufferedStackTrace> stack_buffer(1);
BufferedStackTrace *stack = stack_buffer.data();
stack->Reset();
+ SignalContext sig{info, uc};
GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, uc,
common_flags()->fast_unwind_on_fatal);
@@ -230,7 +349,12 @@ static bool HwasanOnSIGILL(int signo, siginfo_t *info, ucontext_t *uc) {
if (flags()->halt_on_error || !ai.recover)
Die();
+#if defined(__aarch64__)
uc->uc_mcontext.pc += 4;
+#elif defined(__x86_64__)
+#else
+# error Unsupported architecture
+#endif
return true;
}
@@ -242,8 +366,8 @@ static void OnStackUnwind(const SignalContext &sig, const void *,
void HwasanOnDeadlySignal(int signo, void *info, void *context) {
// Probably a tag mismatch.
- if (signo == SIGILL)
- if (HwasanOnSIGILL(signo, (siginfo_t *)info, (ucontext_t*)context))
+ if (signo == SIGTRAP)
+ if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
return;
HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
diff --git a/lib/hwasan/hwasan_mapping.h b/lib/hwasan/hwasan_mapping.h
new file mode 100644
index 000000000000..650a5aefcb2d
--- /dev/null
+++ b/lib/hwasan/hwasan_mapping.h
@@ -0,0 +1,85 @@
+//===-- hwasan_mapping.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and defines memory mapping.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_MAPPING_H
+#define HWASAN_MAPPING_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+// Typical mapping on Linux/x86_64 with fixed shadow mapping:
+// || [0x080000000000, 0x7fffffffffff] || HighMem ||
+// || [0x008000000000, 0x07ffffffffff] || HighShadow ||
+// || [0x000100000000, 0x007fffffffff] || ShadowGap ||
+// || [0x000010000000, 0x0000ffffffff] || LowMem ||
+// || [0x000001000000, 0x00000fffffff] || LowShadow ||
+// || [0x000000000000, 0x000000ffffff] || ShadowGap ||
+//
+// and with dynamic shadow mapped at [0x770d59f40000, 0x7f0d59f40000]:
+// || [0x7f0d59f40000, 0x7fffffffffff] || HighMem ||
+// || [0x7efe2f934000, 0x7f0d59f3ffff] || HighShadow ||
+// || [0x7e7e2f934000, 0x7efe2f933fff] || ShadowGap ||
+// || [0x770d59f40000, 0x7e7e2f933fff] || LowShadow ||
+// || [0x000000000000, 0x770d59f3ffff] || LowMem ||
+
+// Typical mapping on Android/AArch64 (39-bit VMA):
+// || [0x001000000000, 0x007fffffffff] || HighMem ||
+// || [0x000800000000, 0x000fffffffff] || ShadowGap ||
+// || [0x000100000000, 0x0007ffffffff] || HighShadow ||
+// || [0x000010000000, 0x0000ffffffff] || LowMem ||
+// || [0x000001000000, 0x00000fffffff] || LowShadow ||
+// || [0x000000000000, 0x000000ffffff] || ShadowGap ||
+//
+// and with dynamic shadow mapped: [0x007477480000, 0x007c77480000]:
+// || [0x007c77480000, 0x007fffffffff] || HighMem ||
+// || [0x007c3ebc8000, 0x007c7747ffff] || HighShadow ||
+// || [0x007bbebc8000, 0x007c3ebc7fff] || ShadowGap ||
+// || [0x007477480000, 0x007bbebc7fff] || LowShadow ||
+// || [0x000000000000, 0x00747747ffff] || LowMem ||
+
+static constexpr __sanitizer::u64 kDefaultShadowSentinel = ~(__sanitizer::u64)0;
+
+// Reasonable values are 4 (for 1/16th shadow) and 6 (for 1/64th).
+constexpr __sanitizer::uptr kShadowScale = 4;
+constexpr __sanitizer::uptr kShadowAlignment = 1ULL << kShadowScale;
+
+#if SANITIZER_ANDROID
+# define HWASAN_FIXED_MAPPING 0
+#else
+# define HWASAN_FIXED_MAPPING 1
+#endif
+
+#if HWASAN_FIXED_MAPPING
+# define SHADOW_OFFSET (0)
+# define HWASAN_PREMAP_SHADOW 0
+#else
+# define SHADOW_OFFSET (__hwasan_shadow_memory_dynamic_address)
+# define HWASAN_PREMAP_SHADOW 1
+#endif
+
+#define SHADOW_GRANULARITY (1ULL << kShadowScale)
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem) >> kShadowScale) + SHADOW_OFFSET)
+#define SHADOW_TO_MEM(shadow) (((uptr)(shadow) - SHADOW_OFFSET) << kShadowScale)
+
+#define MEM_TO_SHADOW_SIZE(size) ((uptr)(size) >> kShadowScale)
+
+#define MEM_IS_APP(mem) MemIsApp((uptr)(mem))
+
+namespace __hwasan {
+
+bool MemIsApp(uptr p);
+
+} // namespace __hwasan
+
+#endif // HWASAN_MAPPING_H
diff --git a/lib/hwasan/hwasan_new_delete.cc b/lib/hwasan/hwasan_new_delete.cc
index 3ccc26734bf6..63ca74edd481 100644
--- a/lib/hwasan/hwasan_new_delete.cc
+++ b/lib/hwasan/hwasan_new_delete.cc
@@ -1,4 +1,4 @@
-//===-- hwasan_new_delete.cc ------------------------------------------------===//
+//===-- hwasan_new_delete.cc ----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -15,6 +15,7 @@
#include "hwasan.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
@@ -32,7 +33,7 @@ namespace std {
#define OPERATOR_NEW_BODY(nothrow) \
GET_MALLOC_STACK_TRACE; \
void *res = hwasan_malloc(size, &stack);\
- if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res
INTERCEPTOR_ATTRIBUTE
diff --git a/lib/hwasan/hwasan_poisoning.cc b/lib/hwasan/hwasan_poisoning.cc
index 411fd05b10d5..b99d8ed0be79 100644
--- a/lib/hwasan/hwasan_poisoning.cc
+++ b/lib/hwasan/hwasan_poisoning.cc
@@ -13,6 +13,7 @@
#include "hwasan_poisoning.h"
+#include "hwasan_mapping.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -22,7 +23,7 @@ uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
CHECK(IsAligned(p, kShadowAlignment));
CHECK(IsAligned(size, kShadowAlignment));
uptr shadow_start = MEM_TO_SHADOW(p);
- uptr shadow_size = MEM_TO_SHADOW_OFFSET(size);
+ uptr shadow_size = MEM_TO_SHADOW_SIZE(size);
internal_memset((void *)shadow_start, tag, shadow_size);
return AddTagToPointer(p, tag);
}
diff --git a/lib/hwasan/hwasan_report.cc b/lib/hwasan/hwasan_report.cc
index a3c6709491d3..16e9016ea35b 100644
--- a/lib/hwasan/hwasan_report.cc
+++ b/lib/hwasan/hwasan_report.cc
@@ -1,4 +1,4 @@
-//===-- hwasan_report.cc ----------------------------------------------------===//
+//===-- hwasan_report.cc --------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,6 +14,7 @@
#include "hwasan.h"
#include "hwasan_allocator.h"
+#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -36,9 +37,9 @@ static StackTrace GetStackTraceFromId(u32 id) {
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
- const char *Allocation() { return Magenta(); }
- const char *Origin() { return Magenta(); }
- const char *Name() { return Green(); }
+ const char *Allocation() const { return Magenta(); }
+ const char *Origin() const { return Magenta(); }
+ const char *Name() const { return Green(); }
};
struct HeapAddressDescription {
@@ -129,5 +130,4 @@ void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
ReportErrorSummary("tag-mismatch", stack);
}
-
} // namespace __hwasan
diff --git a/lib/hwasan/hwasan_report.h b/lib/hwasan/hwasan_report.h
new file mode 100644
index 000000000000..bb33f1a87308
--- /dev/null
+++ b/lib/hwasan/hwasan_report.h
@@ -0,0 +1,36 @@
+//===-- hwasan_report.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. HWASan-private header for error
+/// reporting functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_REPORT_H
+#define HWASAN_REPORT_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __hwasan {
+
+void ReportInvalidAccess(StackTrace *stack, u32 origin);
+void ReportStats();
+void ReportInvalidAccessInsideAddressRange(const char *what, const void *start,
+ uptr size, uptr offset);
+void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
+ bool is_store);
+
+void ReportAtExitStatistics();
+
+
+} // namespace __hwasan
+
+#endif // HWASAN_REPORT_H
diff --git a/lib/hwasan/hwasan_thread.cc b/lib/hwasan/hwasan_thread.cc
index d6551ffc6fdc..b50c0fc76be4 100644
--- a/lib/hwasan/hwasan_thread.cc
+++ b/lib/hwasan/hwasan_thread.cc
@@ -1,5 +1,6 @@
#include "hwasan.h"
+#include "hwasan_mapping.h"
#include "hwasan_thread.h"
#include "hwasan_poisoning.h"
#include "hwasan_interface_internal.h"
@@ -8,6 +9,19 @@
namespace __hwasan {
+static u32 RandomSeed() {
+ u32 seed;
+ do {
+ if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed),
+ /*blocking=*/false))) {
+ seed = static_cast<u32>(
+ (NanoTime() >> 12) ^
+ (reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4));
+ }
+ } while (!seed);
+ return seed;
+}
+
HwasanThread *HwasanThread::Create(thread_callback_t start_routine,
void *arg) {
uptr PageSize = GetPageSizeCached();
@@ -16,6 +30,7 @@ HwasanThread *HwasanThread::Create(thread_callback_t start_routine,
thread->start_routine_ = start_routine;
thread->arg_ = arg;
thread->destructor_iterations_ = GetPthreadDestructorIterations();
+ thread->random_state_ = flags()->random_tags ? RandomSeed() : 0;
return thread;
}
@@ -72,4 +87,28 @@ thread_return_t HwasanThread::ThreadStart() {
return res;
}
+static u32 xorshift(u32 state) {
+ state ^= state << 13;
+ state ^= state >> 17;
+ state ^= state << 5;
+ return state;
+}
+
+// Generate a (pseudo-)random non-zero tag.
+tag_t HwasanThread::GenerateRandomTag() {
+ tag_t tag;
+ do {
+ if (flags()->random_tags) {
+ if (!random_buffer_)
+ random_buffer_ = random_state_ = xorshift(random_state_);
+ CHECK(random_buffer_);
+ tag = random_buffer_ & 0xFF;
+ random_buffer_ >>= 8;
+ } else {
+ tag = random_state_ = (random_state_ + 1) & 0xFF;
+ }
+ } while (!tag);
+ return tag;
+}
+
} // namespace __hwasan
diff --git a/lib/hwasan/hwasan_thread.h b/lib/hwasan/hwasan_thread.h
index 96f1bb813adf..1e482adeac84 100644
--- a/lib/hwasan/hwasan_thread.h
+++ b/lib/hwasan/hwasan_thread.h
@@ -52,6 +52,8 @@ class HwasanThread {
HwasanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
+ tag_t GenerateRandomTag();
+
int destructor_iterations_;
private:
@@ -70,6 +72,9 @@ class HwasanThread {
unsigned in_symbolizer_;
unsigned in_interceptor_scope_;
+ u32 random_state_;
+ u32 random_buffer_;
+
HwasanThreadLocalMallocStorage malloc_storage_;
};