aboutsummaryrefslogtreecommitdiffstats
path: root/lib/scudo
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2018-07-28 11:06:48 +0000
committerDimitry Andric <dim@FreeBSD.org>2018-07-28 11:06:48 +0000
commit93c1b73a09a52d4a265f683bf1954b08bb430049 (patch)
tree5543464d74945196cc890e9d9099e5d0660df7eb /lib/scudo
parent0d8e7490d6e8a13a8f0977d9b7771803b9f64ea0 (diff)
downloadsrc-93c1b73a09a52d4a265f683bf1954b08bb430049.tar.gz
src-93c1b73a09a52d4a265f683bf1954b08bb430049.zip
Vendor import of compiler-rt trunk r338150:vendor/compiler-rt/compiler-rt-trunk-r338150
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=336817 svn path=/vendor/compiler-rt/compiler-rt-trunk-r338150/; revision=336818; tag=vendor/compiler-rt/compiler-rt-trunk-r338150
Diffstat (limited to 'lib/scudo')
-rw-r--r--lib/scudo/CMakeLists.txt96
-rw-r--r--lib/scudo/scudo_allocator.cpp495
-rw-r--r--lib/scudo/scudo_allocator.h36
-rw-r--r--lib/scudo/scudo_allocator_combined.h15
-rw-r--r--lib/scudo/scudo_allocator_secondary.h173
-rw-r--r--lib/scudo/scudo_errors.cpp77
-rw-r--r--lib/scudo/scudo_errors.h35
-rw-r--r--lib/scudo/scudo_flags.cpp20
-rw-r--r--lib/scudo/scudo_interceptors.cpp75
-rw-r--r--lib/scudo/scudo_interface_internal.h13
-rw-r--r--lib/scudo/scudo_malloc.cpp85
-rw-r--r--lib/scudo/scudo_new_delete.cpp108
-rw-r--r--lib/scudo/scudo_platform.h16
-rw-r--r--lib/scudo/scudo_termination.cpp2
-rw-r--r--lib/scudo/scudo_tsd.h24
-rw-r--r--lib/scudo/scudo_tsd_exclusive.cpp4
-rw-r--r--lib/scudo/scudo_tsd_exclusive.inc4
-rw-r--r--lib/scudo/scudo_tsd_shared.cpp67
-rw-r--r--lib/scudo/scudo_tsd_shared.inc16
-rw-r--r--lib/scudo/scudo_utils.cpp21
20 files changed, 883 insertions, 499 deletions
diff --git a/lib/scudo/CMakeLists.txt b/lib/scudo/CMakeLists.txt
index 4d26a3477feb..0646c3dd4f53 100644
--- a/lib/scudo/CMakeLists.txt
+++ b/lib/scudo/CMakeLists.txt
@@ -7,11 +7,41 @@ set(SCUDO_CFLAGS ${SANITIZER_COMMON_CFLAGS})
list(APPEND SCUDO_CFLAGS -fbuiltin)
append_rtti_flag(OFF SCUDO_CFLAGS)
+set(SCUDO_MINIMAL_DYNAMIC_LIBS ${SANITIZER_COMMON_LINK_LIBS})
+append_list_if(COMPILER_RT_HAS_LIBDL dl SCUDO_MINIMAL_DYNAMIC_LIBS)
+append_list_if(COMPILER_RT_HAS_LIBRT rt SCUDO_MINIMAL_DYNAMIC_LIBS)
+append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread SCUDO_MINIMAL_DYNAMIC_LIBS)
+append_list_if(COMPILER_RT_HAS_LIBLOG log SCUDO_MINIMAL_DYNAMIC_LIBS)
+
+set(SCUDO_DYNAMIC_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS})
+# Use gc-sections by default to avoid unused code being pulled in.
+list(APPEND SCUDO_DYNAMIC_LINK_FLAGS -Wl,--gc-sections)
+
+# The minimal Scudo runtime does not inlude the UBSan runtime.
+set(SCUDO_MINIMAL_OBJECT_LIBS
+ RTSanitizerCommonNoTermination
+ RTSanitizerCommonLibc
+ RTInterception)
+set(SCUDO_OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS})
+set(SCUDO_DYNAMIC_LIBS ${SCUDO_MINIMAL_DYNAMIC_LIBS})
+
+if (FUCHSIA)
+ list(APPEND SCUDO_CFLAGS -nostdinc++)
+ list(APPEND SCUDO_DYNAMIC_LINK_FLAGS -nostdlib++)
+else()
+ list(APPEND SCUDO_DYNAMIC_LIBS ${SANITIZER_CXX_ABI_LIBRARY})
+ list(APPEND SCUDO_OBJECT_LIBS
+ RTSanitizerCommonCoverage
+ RTSanitizerCommonSymbolizer
+ RTUbsan)
+endif()
+
set(SCUDO_SOURCES
scudo_allocator.cpp
- scudo_flags.cpp
scudo_crc32.cpp
- scudo_interceptors.cpp
+ scudo_errors.cpp
+ scudo_flags.cpp
+ scudo_malloc.cpp
scudo_termination.cpp
scudo_tsd_exclusive.cpp
scudo_tsd_shared.cpp
@@ -20,6 +50,21 @@ set(SCUDO_SOURCES
set(SCUDO_CXX_SOURCES
scudo_new_delete.cpp)
+set(SCUDO_HEADERS
+ scudo_allocator.h
+ scudo_allocator_combined.h
+ scudo_allocator_secondary.h
+ scudo_crc32.h
+ scudo_errors.h
+ scudo_flags.h
+ scudo_flags.inc
+ scudo_interface_internal.h
+ scudo_platform.h
+ scudo_tsd.h
+ scudo_tsd_exclusive.inc
+ scudo_tsd_shared.inc
+ scudo_utils.h)
+
# Enable the SSE 4.2 instruction set for scudo_crc32.cpp, if available.
if (COMPILER_RT_HAS_MSSE4_2_FLAG)
set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -msse4.2)
@@ -32,41 +77,58 @@ if (COMPILER_RT_HAS_MCRC_FLAG)
endif()
if(COMPILER_RT_HAS_SCUDO)
- set(SCUDO_DYNAMIC_LIBS ${SANITIZER_COMMON_LINK_LIBS})
- append_list_if(COMPILER_RT_HAS_LIBDL dl SCUDO_DYNAMIC_LIBS)
- append_list_if(COMPILER_RT_HAS_LIBRT rt SCUDO_DYNAMIC_LIBS)
- append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread SCUDO_DYNAMIC_LIBS)
- append_list_if(COMPILER_RT_HAS_LIBLOG log SCUDO_DYNAMIC_LIBS)
+ add_compiler_rt_runtime(clang_rt.scudo_minimal
+ STATIC
+ ARCHS ${SCUDO_SUPPORTED_ARCH}
+ SOURCES ${SCUDO_SOURCES}
+ ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+ OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS}
+ CFLAGS ${SCUDO_CFLAGS}
+ PARENT_TARGET scudo)
+ add_compiler_rt_runtime(clang_rt.scudo_cxx_minimal
+ STATIC
+ ARCHS ${SCUDO_SUPPORTED_ARCH}
+ SOURCES ${SCUDO_CXX_SOURCES}
+ ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+ CFLAGS ${SCUDO_CFLAGS}
+ PARENT_TARGET scudo)
add_compiler_rt_runtime(clang_rt.scudo
STATIC
ARCHS ${SCUDO_SUPPORTED_ARCH}
SOURCES ${SCUDO_SOURCES}
- OBJECT_LIBS RTSanitizerCommonNoTermination
- RTSanitizerCommonLibc
- RTInterception
- RTUbsan
+ ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+ OBJECT_LIBS ${SCUDO_OBJECT_LIBS}
CFLAGS ${SCUDO_CFLAGS}
PARENT_TARGET scudo)
-
add_compiler_rt_runtime(clang_rt.scudo_cxx
STATIC
ARCHS ${SCUDO_SUPPORTED_ARCH}
SOURCES ${SCUDO_CXX_SOURCES}
+ ADDITIONAL_HEADERS ${SCUDO_HEADERS}
OBJECT_LIBS RTUbsan_cxx
CFLAGS ${SCUDO_CFLAGS}
PARENT_TARGET scudo)
+ add_compiler_rt_runtime(clang_rt.scudo_minimal
+ SHARED
+ ARCHS ${SCUDO_SUPPORTED_ARCH}
+ SOURCES ${SCUDO_SOURCES} ${SCUDO_CXX_SOURCES}
+ ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+ OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS}
+ CFLAGS ${SCUDO_CFLAGS}
+ LINK_FLAGS ${SCUDO_DYNAMIC_LINK_FLAGS}
+ LINK_LIBS ${SCUDO_MINIMAL_DYNAMIC_LIBS}
+ PARENT_TARGET scudo)
+
add_compiler_rt_runtime(clang_rt.scudo
SHARED
ARCHS ${SCUDO_SUPPORTED_ARCH}
SOURCES ${SCUDO_SOURCES} ${SCUDO_CXX_SOURCES}
- OBJECT_LIBS RTSanitizerCommonNoTermination
- RTSanitizerCommonLibc
- RTInterception
- RTUbsan
- RTUbsan_cxx
+ ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+ OBJECT_LIBS ${SCUDO_OBJECT_LIBS}
CFLAGS ${SCUDO_CFLAGS}
+ LINK_FLAGS ${SCUDO_DYNAMIC_LINK_FLAGS}
LINK_LIBS ${SCUDO_DYNAMIC_LIBS}
PARENT_TARGET scudo)
endif()
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index e5a4d714c66e..4a11bf5fcc21 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -16,7 +16,9 @@
#include "scudo_allocator.h"
#include "scudo_crc32.h"
+#include "scudo_errors.h"
#include "scudo_flags.h"
+#include "scudo_interface_internal.h"
#include "scudo_tsd.h"
#include "scudo_utils.h"
@@ -60,40 +62,49 @@ INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
}
-static ScudoBackendAllocator &getBackendAllocator();
+static BackendT &getBackend();
namespace Chunk {
- // We can't use the offset member of the chunk itself, as we would double
- // fetch it without any warranty that it wouldn't have been tampered. To
- // prevent this, we work with a local copy of the header.
- static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
- return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
- AlignedChunkHeaderSize -
- (Header->Offset << MinAlignmentLog));
- }
-
static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
- AlignedChunkHeaderSize);
+ getHeaderSize());
}
static INLINE
const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
return reinterpret_cast<const AtomicPackedHeader *>(
- reinterpret_cast<uptr>(Ptr) - AlignedChunkHeaderSize);
+ reinterpret_cast<uptr>(Ptr) - getHeaderSize());
}
static INLINE bool isAligned(const void *Ptr) {
return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
}
+ // We can't use the offset member of the chunk itself, as we would double
+ // fetch it without any warranty that it wouldn't have been tampered. To
+ // prevent this, we work with a local copy of the header.
+ static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
+ return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize() - (Header->Offset << MinAlignmentLog));
+ }
+
// Returns the usable size for a chunk, meaning the amount of bytes from the
// beginning of the user data to the end of the backend allocated chunk.
static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
- const uptr Size = getBackendAllocator().getActuallyAllocatedSize(
- getBackendPtr(Ptr, Header), Header->ClassId);
- if (Size == 0)
- return 0;
- return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
+ const uptr ClassId = Header->ClassId;
+ if (ClassId)
+ return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
+ (Header->Offset << MinAlignmentLog);
+ return SecondaryT::GetActuallyAllocatedSize(
+ getBackendPtr(Ptr, Header)) - getHeaderSize();
+ }
+
+ // Returns the size the user requested when allocating the chunk.
+ static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) {
+ const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+ if (Header->ClassId)
+ return SizeOrUnusedBytes;
+ return SecondaryT::GetActuallyAllocatedSize(
+ getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
}
// Compute the checksum of the chunk pointer and its header.
@@ -136,9 +147,8 @@ namespace Chunk {
atomic_load_relaxed(getConstAtomicHeader(Ptr));
*NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
if (UNLIKELY(NewUnpackedHeader->Checksum !=
- computeChecksum(Ptr, NewUnpackedHeader))) {
- dieWithMessage("ERROR: corrupted chunk header at address %p\n", Ptr);
- }
+ computeChecksum(Ptr, NewUnpackedHeader)))
+ dieWithMessage("corrupted chunk header at address %p\n", Ptr);
}
// Packs and stores the header, computing the checksum in the process.
@@ -159,14 +169,13 @@ namespace Chunk {
PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
if (UNLIKELY(!atomic_compare_exchange_strong(
getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
- memory_order_relaxed))) {
- dieWithMessage("ERROR: race on chunk header at address %p\n", Ptr);
- }
+ memory_order_relaxed)))
+ dieWithMessage("race on chunk header at address %p\n", Ptr);
}
} // namespace Chunk
struct QuarantineCallback {
- explicit QuarantineCallback(AllocatorCache *Cache)
+ explicit QuarantineCallback(AllocatorCacheT *Cache)
: Cache_(Cache) {}
// Chunk recycling function, returns a quarantined chunk to the backend,
@@ -174,53 +183,48 @@ struct QuarantineCallback {
void Recycle(void *Ptr) {
UnpackedHeader Header;
Chunk::loadHeader(Ptr, &Header);
- if (UNLIKELY(Header.State != ChunkQuarantine)) {
- dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
- Ptr);
- }
+ if (UNLIKELY(Header.State != ChunkQuarantine))
+ dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
Chunk::eraseHeader(Ptr);
void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
if (Header.ClassId)
- getBackendAllocator().deallocatePrimary(Cache_, BackendPtr,
- Header.ClassId);
+ getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
else
- getBackendAllocator().deallocateSecondary(BackendPtr);
+ getBackend().deallocateSecondary(BackendPtr);
}
// Internal quarantine allocation and deallocation functions. We first check
// that the batches are indeed serviced by the Primary.
// TODO(kostyak): figure out the best way to protect the batches.
void *Allocate(uptr Size) {
- return getBackendAllocator().allocatePrimary(Cache_, BatchClassId);
+ const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
+ return getBackend().allocatePrimary(Cache_, BatchClassId);
}
void Deallocate(void *Ptr) {
- getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId);
+ const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
+ getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
}
- AllocatorCache *Cache_;
+ AllocatorCacheT *Cache_;
COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
- const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
};
-typedef Quarantine<QuarantineCallback, void> ScudoQuarantine;
-typedef ScudoQuarantine::Cache ScudoQuarantineCache;
-COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
+typedef Quarantine<QuarantineCallback, void> QuarantineT;
+typedef QuarantineT::Cache QuarantineCacheT;
+COMPILER_CHECK(sizeof(QuarantineCacheT) <=
sizeof(ScudoTSD::QuarantineCachePlaceHolder));
-ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) {
- return reinterpret_cast<ScudoQuarantineCache *>(
- TSD->QuarantineCachePlaceHolder);
+QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
+ return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
}
-struct ScudoAllocator {
+struct Allocator {
static const uptr MaxAllowedMallocSize =
FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
- typedef ReturnNullOrDieOnFailure FailureHandler;
-
- ScudoBackendAllocator BackendAllocator;
- ScudoQuarantine AllocatorQuarantine;
+ BackendT Backend;
+ QuarantineT Quarantine;
u32 QuarantineChunksUpToSize;
@@ -234,49 +238,16 @@ struct ScudoAllocator {
atomic_uint8_t RssLimitExceeded;
atomic_uint64_t RssLastCheckedAtNS;
- explicit ScudoAllocator(LinkerInitialized)
- : AllocatorQuarantine(LINKER_INITIALIZED) {}
-
- void performSanityChecks() {
- // Verify that the header offset field can hold the maximum offset. In the
- // case of the Secondary allocator, it takes care of alignment and the
- // offset will always be 0. In the case of the Primary, the worst case
- // scenario happens in the last size class, when the backend allocation
- // would already be aligned on the requested alignment, which would happen
- // to be the maximum alignment that would fit in that size class. As a
- // result, the maximum offset will be at most the maximum alignment for the
- // last size class minus the header size, in multiples of MinAlignment.
- UnpackedHeader Header = {};
- const uptr MaxPrimaryAlignment =
- 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
- const uptr MaxOffset =
- (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
- Header.Offset = MaxOffset;
- if (Header.Offset != MaxOffset) {
- dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
- "header\n");
- }
- // Verify that we can fit the maximum size or amount of unused bytes in the
- // header. Given that the Secondary fits the allocation to a page, the worst
- // case scenario happens in the Primary. It will depend on the second to
- // last and last class sizes, as well as the dynamic base for the Primary.
- // The following is an over-approximation that works for our needs.
- const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
- Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
- if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) {
- dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in "
- "the header\n");
- }
+ explicit Allocator(LinkerInitialized)
+ : Quarantine(LINKER_INITIALIZED) {}
- const uptr LargestClassId = SizeClassMap::kLargestClassID;
- Header.ClassId = LargestClassId;
- if (Header.ClassId != LargestClassId) {
- dieWithMessage("ERROR: the largest class ID doesn't fit in the header\n");
- }
- }
+ NOINLINE void performSanityChecks();
void init() {
SanitizerToolName = "Scudo";
+ PrimaryAllocatorName = "ScudoPrimary";
+ SecondaryAllocatorName = "ScudoSecondary";
+
initFlags();
performSanityChecks();
@@ -287,10 +258,10 @@ struct ScudoAllocator {
atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- BackendAllocator.init(common_flags()->allocator_release_to_os_interval_ms);
+ Backend.init(common_flags()->allocator_release_to_os_interval_ms);
HardRssLimitMb = common_flags()->hard_rss_limit_mb;
SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
- AllocatorQuarantine.Init(
+ Quarantine.Init(
static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize;
@@ -319,62 +290,36 @@ struct ScudoAllocator {
return Chunk::isValid(Ptr);
}
- // Opportunistic RSS limit check. This will update the RSS limit status, if
- // it can, every 100ms, otherwise it will just return the current one.
- bool isRssLimitExceeded() {
- u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
- const u64 CurrentCheck = MonotonicNanoTime();
- if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
- return atomic_load_relaxed(&RssLimitExceeded);
- if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
- CurrentCheck, memory_order_relaxed))
- return atomic_load_relaxed(&RssLimitExceeded);
- // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
- // RSS from /proc/self/statm by default. We might want to
- // call getrusage directly, even if it's less accurate.
- const uptr CurrentRssMb = GetRSS() >> 20;
- if (HardRssLimitMb && HardRssLimitMb < CurrentRssMb) {
- Report("%s: hard RSS limit exhausted (%zdMb vs %zdMb)\n",
- SanitizerToolName, HardRssLimitMb, CurrentRssMb);
- DumpProcessMap();
- Die();
- }
- if (SoftRssLimitMb) {
- if (atomic_load_relaxed(&RssLimitExceeded)) {
- if (CurrentRssMb <= SoftRssLimitMb)
- atomic_store_relaxed(&RssLimitExceeded, false);
- } else {
- if (CurrentRssMb > SoftRssLimitMb) {
- atomic_store_relaxed(&RssLimitExceeded, true);
- Report("%s: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
- SanitizerToolName, SoftRssLimitMb, CurrentRssMb);
- }
- }
- }
- return atomic_load_relaxed(&RssLimitExceeded);
- }
+ NOINLINE bool isRssLimitExceeded();
// Allocates a chunk.
void *allocate(uptr Size, uptr Alignment, AllocType Type,
bool ForceZeroContents = false) {
initThreadMaybe();
- if (UNLIKELY(Alignment > MaxAlignment))
- return FailureHandler::OnBadRequest();
+ if (UNLIKELY(Alignment > MaxAlignment)) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
+ }
if (UNLIKELY(Alignment < MinAlignment))
Alignment = MinAlignment;
- if (UNLIKELY(Size >= MaxAllowedMallocSize))
- return FailureHandler::OnBadRequest();
- if (UNLIKELY(Size == 0))
- Size = 1;
- uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
- uptr AlignedSize = (Alignment > MinAlignment) ?
- NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
- if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
- return FailureHandler::OnBadRequest();
+ const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
+ Chunk::getHeaderSize();
+ const uptr AlignedSize = (Alignment > MinAlignment) ?
+ NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
+ if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
+ UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
+ }
- if (CheckRssLimit && UNLIKELY(isRssLimitExceeded()))
- return FailureHandler::OnOOM();
+ if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportRssLimitExceeded();
+ }
// Primary and Secondary backed allocations have a different treatment. We
// deal with alignment requirements of Primary serviced allocations here,
@@ -382,27 +327,32 @@ struct ScudoAllocator {
void *BackendPtr;
uptr BackendSize;
u8 ClassId;
- if (PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment)) {
+ if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
BackendSize = AlignedSize;
ClassId = SizeClassMap::ClassID(BackendSize);
- ScudoTSD *TSD = getTSDAndLock();
- BackendPtr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId);
- TSD->unlock();
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
} else {
BackendSize = NeededSize;
ClassId = 0;
- BackendPtr = BackendAllocator.allocateSecondary(BackendSize, Alignment);
+ BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
+ }
+ if (UNLIKELY(!BackendPtr)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportOutOfMemory(Size);
}
- if (UNLIKELY(!BackendPtr))
- return FailureHandler::OnOOM();
// If requested, we will zero out the entire contents of the returned chunk.
if ((ForceZeroContents || ZeroContents) && ClassId)
- memset(BackendPtr, 0,
- BackendAllocator.getActuallyAllocatedSize(BackendPtr, ClassId));
+ memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
UnpackedHeader Header = {};
- uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + AlignedChunkHeaderSize;
+ uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
// Since the Secondary takes care of alignment, a non-aligned pointer
// means it is from the Primary. It is also the only case where the offset
@@ -412,7 +362,7 @@ struct ScudoAllocator {
Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
UserPtr = AlignedUserPtr;
}
- CHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
+ DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
Header.State = ChunkAllocated;
Header.AllocType = Type;
if (ClassId) {
@@ -429,7 +379,8 @@ struct ScudoAllocator {
}
void *Ptr = reinterpret_cast<void *>(UserPtr);
Chunk::storeHeader(Ptr, &Header);
- // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(Ptr, Size);
+ if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
+ __sanitizer_malloc_hook(Ptr, Size);
return Ptr;
}
@@ -438,18 +389,20 @@ struct ScudoAllocator {
// quarantine chunk size threshold.
void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
uptr Size) {
- const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) ||
+ const bool BypassQuarantine = (Quarantine.GetCacheSize() == 0) ||
(Size > QuarantineChunksUpToSize);
if (BypassQuarantine) {
Chunk::eraseHeader(Ptr);
void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
if (Header->ClassId) {
- ScudoTSD *TSD = getTSDAndLock();
- getBackendAllocator().deallocatePrimary(&TSD->Cache, BackendPtr,
- Header->ClassId);
- TSD->unlock();
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
+ Header->ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
} else {
- getBackendAllocator().deallocateSecondary(BackendPtr);
+ getBackend().deallocateSecondary(BackendPtr);
}
} else {
// If a small memory amount was allocated with a larger alignment, we want
@@ -457,21 +410,23 @@ struct ScudoAllocator {
// with tiny chunks, taking a lot of VA memory. This is an approximation
// of the usable size, that allows us to not call
// GetActuallyAllocatedSize.
- uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
+ const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
UnpackedHeader NewHeader = *Header;
NewHeader.State = ChunkQuarantine;
Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
- ScudoTSD *TSD = getTSDAndLock();
- AllocatorQuarantine.Put(getQuarantineCache(TSD),
- QuarantineCallback(&TSD->Cache), Ptr,
- EstimatedSize);
- TSD->unlock();
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
+ Ptr, EstimatedSize);
+ if (UnlockRequired)
+ TSD->unlock();
}
}
// Deallocates a Chunk, which means either adding it to the quarantine or
// directly returning it to the backend if criteria are met.
- void deallocate(void *Ptr, uptr DeleteSize, AllocType Type) {
+ void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
+ AllocType Type) {
// For a deallocation, we only ensure minimal initialization, meaning thread
// local data will be left uninitialized for now (when using ELF TLS). The
// fallback cache will be used instead. This is a workaround for a situation
@@ -479,37 +434,32 @@ struct ScudoAllocator {
// the TLS destructors, ending up in initialized thread specific data never
// being destroyed properly. Any other heap operation will do a full init.
initThreadMaybe(/*MinimalInit=*/true);
- // if (&__sanitizer_free_hook) __sanitizer_free_hook(Ptr);
+ if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
+ __sanitizer_free_hook(Ptr);
if (UNLIKELY(!Ptr))
return;
- if (UNLIKELY(!Chunk::isAligned(Ptr))) {
- dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
- "aligned at address %p\n", Ptr);
- }
+ if (UNLIKELY(!Chunk::isAligned(Ptr)))
+ dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
UnpackedHeader Header;
Chunk::loadHeader(Ptr, &Header);
- if (UNLIKELY(Header.State != ChunkAllocated)) {
- dieWithMessage("ERROR: invalid chunk state when deallocating address "
- "%p\n", Ptr);
- }
+ if (UNLIKELY(Header.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
if (DeallocationTypeMismatch) {
// The deallocation type has to match the allocation one.
if (Header.AllocType != Type) {
// With the exception of memalign'd Chunks, that can be still be free'd.
- if (Header.AllocType != FromMemalign || Type != FromMalloc) {
- dieWithMessage("ERROR: allocation type mismatch when deallocating "
- "address %p\n", Ptr);
- }
+ if (Header.AllocType != FromMemalign || Type != FromMalloc)
+ dieWithMessage("allocation type mismatch when deallocating address "
+ "%p\n", Ptr);
}
}
- uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes :
- Chunk::getUsableSize(Ptr, &Header) - Header.SizeOrUnusedBytes;
+ const uptr Size = Chunk::getSize(Ptr, &Header);
if (DeleteSizeMismatch) {
- if (DeleteSize && DeleteSize != Size) {
- dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
+ if (DeleteSize && DeleteSize != Size)
+ dieWithMessage("invalid sized delete when deallocating address %p\n",
Ptr);
- }
}
+ (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
quarantineOrDeallocateChunk(Ptr, &Header, Size);
}
@@ -517,21 +467,18 @@ struct ScudoAllocator {
// size still fits in the chunk.
void *reallocate(void *OldPtr, uptr NewSize) {
initThreadMaybe();
- if (UNLIKELY(!Chunk::isAligned(OldPtr))) {
- dieWithMessage("ERROR: attempted to reallocate a chunk not properly "
- "aligned at address %p\n", OldPtr);
- }
+ if (UNLIKELY(!Chunk::isAligned(OldPtr)))
+ dieWithMessage("misaligned address when reallocating address %p\n",
+ OldPtr);
UnpackedHeader OldHeader;
Chunk::loadHeader(OldPtr, &OldHeader);
- if (UNLIKELY(OldHeader.State != ChunkAllocated)) {
- dieWithMessage("ERROR: invalid chunk state when reallocating address "
- "%p\n", OldPtr);
- }
+ if (UNLIKELY(OldHeader.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when reallocating address %p\n",
+ OldPtr);
if (DeallocationTypeMismatch) {
- if (UNLIKELY(OldHeader.AllocType != FromMalloc)) {
- dieWithMessage("ERROR: allocation type mismatch when reallocating "
- "address %p\n", OldPtr);
- }
+ if (UNLIKELY(OldHeader.AllocType != FromMalloc))
+ dieWithMessage("allocation type mismatch when reallocating address "
+ "%p\n", OldPtr);
}
const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
// The new size still fits in the current chunk, and the size difference
@@ -548,7 +495,7 @@ struct ScudoAllocator {
// old one.
void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
if (NewPtr) {
- uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
+ const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
UsableSize - OldHeader.SizeOrUnusedBytes;
memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
@@ -564,36 +511,36 @@ struct ScudoAllocator {
UnpackedHeader Header;
Chunk::loadHeader(Ptr, &Header);
// Getting the usable size of a chunk only makes sense if it's allocated.
- if (UNLIKELY(Header.State != ChunkAllocated)) {
- dieWithMessage("ERROR: invalid chunk state when sizing address %p\n",
- Ptr);
- }
+ if (UNLIKELY(Header.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
return Chunk::getUsableSize(Ptr, &Header);
}
void *calloc(uptr NMemB, uptr Size) {
initThreadMaybe();
- if (UNLIKELY(CheckForCallocOverflow(NMemB, Size)))
- return FailureHandler::OnBadRequest();
+ if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportCallocOverflow(NMemB, Size);
+ }
return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
}
void commitBack(ScudoTSD *TSD) {
- AllocatorQuarantine.Drain(getQuarantineCache(TSD),
- QuarantineCallback(&TSD->Cache));
- BackendAllocator.destroyCache(&TSD->Cache);
+ Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
+ Backend.destroyCache(&TSD->Cache);
}
uptr getStats(AllocatorStat StatType) {
initThreadMaybe();
uptr stats[AllocatorStatCount];
- BackendAllocator.getStats(stats);
+ Backend.getStats(stats);
return stats[StatType];
}
- void *handleBadRequest() {
+ bool canReturnNull() {
initThreadMaybe();
- return FailureHandler::OnBadRequest();
+ return AllocatorMayReturnNull();
}
void setRssLimit(uptr LimitMb, bool HardLimit) {
@@ -603,21 +550,90 @@ struct ScudoAllocator {
SoftRssLimitMb = LimitMb;
CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
}
+
+ void printStats() {
+ initThreadMaybe();
+ Backend.printStats();
+ }
};
-static ScudoAllocator Instance(LINKER_INITIALIZED);
+NOINLINE void Allocator::performSanityChecks() {
+ // Verify that the header offset field can hold the maximum offset. In the
+ // case of the Secondary allocator, it takes care of alignment and the
+ // offset will always be 0. In the case of the Primary, the worst case
+ // scenario happens in the last size class, when the backend allocation
+ // would already be aligned on the requested alignment, which would happen
+ // to be the maximum alignment that would fit in that size class. As a
+ // result, the maximum offset will be at most the maximum alignment for the
+ // last size class minus the header size, in multiples of MinAlignment.
+ UnpackedHeader Header = {};
+ const uptr MaxPrimaryAlignment =
+ 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
+ const uptr MaxOffset =
+ (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+ Header.Offset = MaxOffset;
+ if (Header.Offset != MaxOffset)
+ dieWithMessage("maximum possible offset doesn't fit in header\n");
+ // Verify that we can fit the maximum size or amount of unused bytes in the
+ // header. Given that the Secondary fits the allocation to a page, the worst
+ // case scenario happens in the Primary. It will depend on the second to
+ // last and last class sizes, as well as the dynamic base for the Primary.
+ // The following is an over-approximation that works for our needs.
+ const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
+ Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
+ if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
+ dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
+
+ const uptr LargestClassId = SizeClassMap::kLargestClassID;
+ Header.ClassId = LargestClassId;
+ if (Header.ClassId != LargestClassId)
+ dieWithMessage("largest class ID doesn't fit in header\n");
+}
-static ScudoBackendAllocator &getBackendAllocator() {
- return Instance.BackendAllocator;
+// Opportunistic RSS limit check. This will update the RSS limit status, if
+// it can, every 100ms, otherwise it will just return the current one.
+NOINLINE bool Allocator::isRssLimitExceeded() {
+ u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
+ const u64 CurrentCheck = MonotonicNanoTime();
+ if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
+ return atomic_load_relaxed(&RssLimitExceeded);
+ if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
+ CurrentCheck, memory_order_relaxed))
+ return atomic_load_relaxed(&RssLimitExceeded);
+ // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
+ // RSS from /proc/self/statm by default. We might want to
+ // call getrusage directly, even if it's less accurate.
+ const uptr CurrentRssMb = GetRSS() >> 20;
+ if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
+ dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
+ HardRssLimitMb, CurrentRssMb);
+ if (SoftRssLimitMb) {
+ if (atomic_load_relaxed(&RssLimitExceeded)) {
+ if (CurrentRssMb <= SoftRssLimitMb)
+ atomic_store_relaxed(&RssLimitExceeded, false);
+ } else {
+ if (CurrentRssMb > SoftRssLimitMb) {
+ atomic_store_relaxed(&RssLimitExceeded, true);
+ Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
+ SoftRssLimitMb, CurrentRssMb);
+ }
+ }
+ }
+ return atomic_load_relaxed(&RssLimitExceeded);
+}
+
+static Allocator Instance(LINKER_INITIALIZED);
+
+static BackendT &getBackend() {
+ return Instance.Backend;
}
void initScudo() {
Instance.init();
}
-void ScudoTSD::init(bool Shared) {
- UnlockRequired = Shared;
- getBackendAllocator().initCache(&Cache);
+void ScudoTSD::init() {
+ getBackend().initCache(&Cache);
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
}
@@ -625,23 +641,25 @@ void ScudoTSD::commitBack() {
Instance.commitBack(this);
}
-void *scudoMalloc(uptr Size, AllocType Type) {
- return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type));
-}
-
-void scudoFree(void *Ptr, AllocType Type) {
- Instance.deallocate(Ptr, 0, Type);
+void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
+ if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
+ errno = EINVAL;
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportAllocationAlignmentNotPowerOfTwo(Alignment);
+ }
+ return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
}
-void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
- Instance.deallocate(Ptr, Size, Type);
+void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
+ Instance.deallocate(Ptr, Size, Alignment, Type);
}
void *scudoRealloc(void *Ptr, uptr Size) {
if (!Ptr)
return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
if (Size == 0) {
- Instance.deallocate(Ptr, 0, FromMalloc);
+ Instance.deallocate(Ptr, 0, 0, FromMalloc);
return nullptr;
}
return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
@@ -660,24 +678,19 @@ void *scudoPvalloc(uptr Size) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
errno = ENOMEM;
- return Instance.handleBadRequest();
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportPvallocOverflow(Size);
}
// pvalloc(0) should allocate one page.
Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
}
-void *scudoMemalign(uptr Alignment, uptr Size) {
- if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
- errno = EINVAL;
- return Instance.handleBadRequest();
- }
- return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign));
-}
-
int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
- Instance.handleBadRequest();
+ if (!Instance.canReturnNull())
+ reportInvalidPosixMemalignAlignment(Alignment);
return EINVAL;
}
void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
@@ -690,7 +703,9 @@ int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
errno = EINVAL;
- return Instance.handleBadRequest();
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportInvalidAlignedAllocAlignment(Size, Alignment);
}
return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
}
@@ -721,8 +736,8 @@ uptr __sanitizer_get_unmapped_bytes() {
return 1;
}
-uptr __sanitizer_get_estimated_allocated_size(uptr size) {
- return size;
+uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
+ return Size;
}
int __sanitizer_get_ownership(const void *Ptr) {
@@ -733,12 +748,26 @@ uptr __sanitizer_get_allocated_size(const void *Ptr) {
return Instance.getUsableSize(Ptr);
}
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
+ void *Ptr, uptr Size) {
+ (void)Ptr;
+ (void)Size;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
+ (void)Ptr;
+}
+#endif
+
// Interface functions
-extern "C" {
-void __scudo_set_rss_limit(unsigned long LimitMb, int HardLimit) { // NOLINT
+void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
return;
Instance.setRssLimit(LimitMb, !!HardLimit);
}
-} // extern "C"
+
+void __scudo_print_stats() {
+ Instance.printStats();
+}
diff --git a/lib/scudo/scudo_allocator.h b/lib/scudo/scudo_allocator.h
index a561247def9c..0002b4a44b78 100644
--- a/lib/scudo/scudo_allocator.h
+++ b/lib/scudo/scudo_allocator.h
@@ -59,9 +59,17 @@ const uptr MaxAlignmentLog = 24; // 16 MB
const uptr MinAlignment = 1 << MinAlignmentLog;
const uptr MaxAlignment = 1 << MaxAlignmentLog;
-const uptr ChunkHeaderSize = sizeof(PackedHeader);
-const uptr AlignedChunkHeaderSize =
- (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1);
+// constexpr version of __sanitizer::RoundUp without the extraneous CHECK.
+// This way we can use it in constexpr variables and functions declarations.
+constexpr uptr RoundUpTo(uptr Size, uptr Boundary) {
+ return (Size + Boundary - 1) & ~(Boundary - 1);
+}
+
+namespace Chunk {
+ constexpr uptr getHeaderSize() {
+ return RoundUpTo(sizeof(PackedHeader), MinAlignment);
+ }
+}
#if SANITIZER_CAN_USE_ALLOCATOR64
const uptr AllocatorSpace = ~0ULL;
@@ -74,7 +82,7 @@ struct AP64 {
static const uptr kFlags =
SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
};
-typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+typedef SizeClassAllocator64<AP64> PrimaryT;
#else
static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog;
# if SANITIZER_WORDSIZE == 32
@@ -94,30 +102,22 @@ struct AP32 {
SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
};
-typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+typedef SizeClassAllocator32<AP32> PrimaryT;
#endif // SANITIZER_CAN_USE_ALLOCATOR64
-// __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own.
-INLINE uptr RoundUpTo(uptr Size, uptr Boundary) {
- return (Size + Boundary - 1) & ~(Boundary - 1);
-}
-
#include "scudo_allocator_secondary.h"
#include "scudo_allocator_combined.h"
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef ScudoLargeMmapAllocator SecondaryAllocator;
-typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> ScudoBackendAllocator;
+typedef SizeClassAllocatorLocalCache<PrimaryT> AllocatorCacheT;
+typedef LargeMmapAllocator SecondaryT;
+typedef CombinedAllocator<PrimaryT, AllocatorCacheT, SecondaryT> BackendT;
void initScudo();
-void *scudoMalloc(uptr Size, AllocType Type);
-void scudoFree(void *Ptr, AllocType Type);
-void scudoSizedFree(void *Ptr, uptr Size, AllocType Type);
+void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type);
+void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type);
void *scudoRealloc(void *Ptr, uptr Size);
void *scudoCalloc(uptr NMemB, uptr Size);
-void *scudoMemalign(uptr Alignment, uptr Size);
void *scudoValloc(uptr Size);
void *scudoPvalloc(uptr Size);
int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
diff --git a/lib/scudo/scudo_allocator_combined.h b/lib/scudo/scudo_allocator_combined.h
index 25e273114c23..6e40660ba9ab 100644
--- a/lib/scudo/scudo_allocator_combined.h
+++ b/lib/scudo/scudo_allocator_combined.h
@@ -16,12 +16,12 @@
#define SCUDO_ALLOCATOR_COMBINED_H_
#ifndef SCUDO_ALLOCATOR_H_
-#error "This file must be included inside scudo_allocator.h."
+# error "This file must be included inside scudo_allocator.h."
#endif
template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator>
-class ScudoCombinedAllocator {
+class CombinedAllocator {
public:
void init(s32 ReleaseToOSIntervalMs) {
Primary.Init(ReleaseToOSIntervalMs);
@@ -49,12 +49,6 @@ class ScudoCombinedAllocator {
Secondary.Deallocate(&Stats, Ptr);
}
- uptr getActuallyAllocatedSize(void *Ptr, uptr ClassId) {
- if (ClassId)
- return PrimaryAllocator::ClassIdToSize(ClassId);
- return Secondary.GetActuallyAllocatedSize(Ptr);
- }
-
void initCache(AllocatorCache *Cache) {
Cache->Init(&Stats);
}
@@ -67,6 +61,11 @@ class ScudoCombinedAllocator {
Stats.Get(StatType);
}
+ void printStats() {
+ Primary.PrintStats();
+ Secondary.PrintStats();
+ }
+
private:
PrimaryAllocator Primary;
SecondaryAllocator Secondary;
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index f2002ed986c3..ff6246e25883 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -21,120 +21,173 @@
# error "This file must be included inside scudo_allocator.h."
#endif
-class ScudoLargeMmapAllocator {
+// Secondary backed allocations are standalone chunks that contain extra
+// information stored in a LargeChunk::Header prior to the frontend's header.
+//
+// The secondary takes care of alignment requirements (so that it can release
+// unnecessary pages in the rare event of larger alignments), and as such must
+// know about the frontend's header size.
+//
+// Since Windows doesn't support partial releasing of a reserved memory region,
+// we have to keep track of both the reserved and the committed memory.
+//
+// The resulting chunk resembles the following:
+//
+// +--------------------+
+// | Guard page(s) |
+// +--------------------+
+// | Unused space* |
+// +--------------------+
+// | LargeChunk::Header |
+// +--------------------+
+// | {Unp,P}ackedHeader |
+// +--------------------+
+// | Data (aligned) |
+// +--------------------+
+// | Unused space** |
+// +--------------------+
+// | Guard page(s) |
+// +--------------------+
+
+namespace LargeChunk {
+ struct Header {
+ ReservedAddressRange StoredRange;
+ uptr CommittedSize;
+ uptr Size;
+ };
+ constexpr uptr getHeaderSize() {
+ return RoundUpTo(sizeof(Header), MinAlignment);
+ }
+ static Header *getHeader(uptr Ptr) {
+ return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+ }
+ static Header *getHeader(const void *Ptr) {
+ return getHeader(reinterpret_cast<uptr>(Ptr));
+ }
+} // namespace LargeChunk
+
+class LargeMmapAllocator {
public:
void Init() {
- PageSizeCached = GetPageSizeCached();
+ internal_memset(this, 0, sizeof(*this));
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
- const uptr UserSize = Size - AlignedChunkHeaderSize;
+ const uptr UserSize = Size - Chunk::getHeaderSize();
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
- uptr MapSize = Size + AlignedReservedAddressRangeSize;
- if (Alignment > MinAlignment)
- MapSize += Alignment;
- const uptr PageSize = PageSizeCached;
- MapSize = RoundUpTo(MapSize, PageSize);
+ uptr ReservedSize = Size + LargeChunk::getHeaderSize();
+ if (UNLIKELY(Alignment > MinAlignment))
+ ReservedSize += Alignment;
+ const uptr PageSize = GetPageSizeCached();
+ ReservedSize = RoundUpTo(ReservedSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
- MapSize += 2 * PageSize;
+ ReservedSize += 2 * PageSize;
ReservedAddressRange AddressRange;
- uptr MapBeg = AddressRange.Init(MapSize);
- if (MapBeg == ~static_cast<uptr>(0))
- return ReturnNullOrDieOnFailure::OnOOM();
+ uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
+ if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
+ return nullptr;
// A page-aligned pointer is assumed after that, so check it now.
- CHECK(IsAligned(MapBeg, PageSize));
- uptr MapEnd = MapBeg + MapSize;
+ DCHECK(IsAligned(ReservedBeg, PageSize));
+ uptr ReservedEnd = ReservedBeg + ReservedSize;
// The beginning of the user area for that allocation comes after the
// initial guard page, and both headers. This is the pointer that has to
// abide by alignment requirements.
- uptr UserBeg = MapBeg + PageSize + HeadersSize;
+ uptr CommittedBeg = ReservedBeg + PageSize;
+ uptr UserBeg = CommittedBeg + HeadersSize;
uptr UserEnd = UserBeg + UserSize;
+ uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
// In the rare event of larger alignments, we will attempt to fit the mmap
// area better and unmap extraneous memory. This will also ensure that the
// offset and unused bytes field of the header stay small.
- if (Alignment > MinAlignment) {
+ if (UNLIKELY(Alignment > MinAlignment)) {
if (!IsAligned(UserBeg, Alignment)) {
UserBeg = RoundUpTo(UserBeg, Alignment);
- CHECK_GE(UserBeg, MapBeg);
- uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) -
- PageSize;
- CHECK_GE(NewMapBeg, MapBeg);
- if (NewMapBeg != MapBeg) {
- AddressRange.Unmap(MapBeg, NewMapBeg - MapBeg);
- MapBeg = NewMapBeg;
+ CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
+ const uptr NewReservedBeg = CommittedBeg - PageSize;
+ DCHECK_GE(NewReservedBeg, ReservedBeg);
+ if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
+ AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
+ ReservedBeg = NewReservedBeg;
}
UserEnd = UserBeg + UserSize;
+ CommittedEnd = RoundUpTo(UserEnd, PageSize);
}
- uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
- if (NewMapEnd != MapEnd) {
- AddressRange.Unmap(NewMapEnd, MapEnd - NewMapEnd);
- MapEnd = NewMapEnd;
+ const uptr NewReservedEnd = CommittedEnd + PageSize;
+ DCHECK_LE(NewReservedEnd, ReservedEnd);
+ if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
+ AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
+ ReservedEnd = NewReservedEnd;
}
- MapSize = MapEnd - MapBeg;
}
- CHECK_LE(UserEnd, MapEnd - PageSize);
- // Actually mmap the memory, preserving the guard pages on either side
- CHECK_EQ(MapBeg + PageSize,
- AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize));
- const uptr Ptr = UserBeg - AlignedChunkHeaderSize;
- ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
- *StoredRange = AddressRange;
+ DCHECK_LE(UserEnd, CommittedEnd);
+ const uptr CommittedSize = CommittedEnd - CommittedBeg;
+ // Actually mmap the memory, preserving the guard pages on either sides.
+ CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
+ const uptr Ptr = UserBeg - Chunk::getHeaderSize();
+ LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
+ H->StoredRange = AddressRange;
+ H->Size = CommittedEnd - Ptr;
+ H->CommittedSize = CommittedSize;
// The primary adds the whole class size to the stats when allocating a
// chunk, so we will do something similar here. But we will not account for
// the guard pages.
{
SpinMutexLock l(&StatsMutex);
- Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
- Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
+ Stats->Add(AllocatorStatAllocated, CommittedSize);
+ Stats->Add(AllocatorStatMapped, CommittedSize);
+ AllocatedBytes += CommittedSize;
+ if (LargestSize < CommittedSize)
+ LargestSize = CommittedSize;
+ NumberOfAllocs++;
}
return reinterpret_cast<void *>(Ptr);
}
void Deallocate(AllocatorStats *Stats, void *Ptr) {
+ LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
// Since we're unmapping the entirety of where the ReservedAddressRange
// actually is, copy onto the stack.
- const uptr PageSize = PageSizeCached;
- ReservedAddressRange AddressRange = *getReservedAddressRange(Ptr);
+ ReservedAddressRange AddressRange = H->StoredRange;
+ const uptr Size = H->CommittedSize;
{
SpinMutexLock l(&StatsMutex);
- Stats->Sub(AllocatorStatAllocated, AddressRange.size() - 2 * PageSize);
- Stats->Sub(AllocatorStatMapped, AddressRange.size() - 2 * PageSize);
+ Stats->Sub(AllocatorStatAllocated, Size);
+ Stats->Sub(AllocatorStatMapped, Size);
+ FreedBytes += Size;
+ NumberOfFrees++;
}
AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
AddressRange.size());
}
- uptr GetActuallyAllocatedSize(void *Ptr) {
- ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
- // Deduct PageSize as ReservedAddressRange size includes the trailing guard
- // page.
- uptr MapEnd = reinterpret_cast<uptr>(StoredRange->base()) +
- StoredRange->size() - PageSizeCached;
- return MapEnd - reinterpret_cast<uptr>(Ptr);
+ static uptr GetActuallyAllocatedSize(void *Ptr) {
+ return LargeChunk::getHeader(Ptr)->Size;
}
- private:
- ReservedAddressRange *getReservedAddressRange(uptr Ptr) {
- return reinterpret_cast<ReservedAddressRange*>(
- Ptr - sizeof(ReservedAddressRange));
- }
- ReservedAddressRange *getReservedAddressRange(const void *Ptr) {
- return getReservedAddressRange(reinterpret_cast<uptr>(Ptr));
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
+ "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
+ FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
+ (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
}
- static constexpr uptr AlignedReservedAddressRangeSize =
- (sizeof(ReservedAddressRange) + MinAlignment - 1) & ~(MinAlignment - 1);
+ private:
static constexpr uptr HeadersSize =
- AlignedReservedAddressRangeSize + AlignedChunkHeaderSize;
+ LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
- uptr PageSizeCached;
- SpinMutex StatsMutex;
+ StaticSpinMutex StatsMutex;
+ u32 NumberOfAllocs;
+ u32 NumberOfFrees;
+ uptr AllocatedBytes;
+ uptr FreedBytes;
+ uptr LargestSize;
};
#endif // SCUDO_ALLOCATOR_SECONDARY_H_
diff --git a/lib/scudo/scudo_errors.cpp b/lib/scudo/scudo_errors.cpp
new file mode 100644
index 000000000000..d11e03cf9163
--- /dev/null
+++ b/lib/scudo/scudo_errors.cpp
@@ -0,0 +1,77 @@
+//===-- scudo_errors.cpp ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Verbose termination functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __scudo {
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+ dieWithMessage("calloc parameters overflow: count * size (%zd * %zd) cannot "
+ "be represented with type size_t\n", Count, Size);
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+ dieWithMessage("pvalloc parameters overflow: size 0x%zx rounded up to system "
+ "page size 0x%zx cannot be represented in type size_t\n", Size,
+ GetPageSizeCached());
+}
+
+void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
+ uptr MaxAlignment) {
+ dieWithMessage("invalid allocation alignment: %zd exceeds maximum supported "
+ "allocation of %zd\n", Alignment, MaxAlignment);
+}
+
+void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment) {
+ dieWithMessage("invalid allocation alignment: %zd, alignment must be a power "
+ "of two\n", Alignment);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+ dieWithMessage("invalid alignment requested in posix_memalign: %zd, alignment"
+ " must be a power of two and a multiple of sizeof(void *) == %zd\n",
+ Alignment, sizeof(void *)); // NOLINT
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment) {
+#if SANITIZER_POSIX
+ dieWithMessage("invalid alignment requested in aligned_alloc: %zd, alignment "
+ "must be a power of two and the requested size 0x%zx must be a multiple "
+ "of alignment\n", Alignment, Size);
+#else
+ dieWithMessage("invalid alignment requested in aligned_alloc: %zd, the "
+ "requested size 0x%zx must be a multiple of alignment\n", Alignment,
+ Size);
+#endif
+}
+
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize) {
+ dieWithMessage("requested allocation size 0x%zx (0x%zx after adjustments) "
+ "exceeds maximum supported size of 0x%zx\n", UserSize, TotalSize,
+ MaxSize);
+}
+
+void NORETURN reportRssLimitExceeded() {
+ dieWithMessage("specified RSS limit exceeded, currently set to "
+ "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb);
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+ dieWithMessage("allocator is out of memory trying to allocate 0x%zx bytes\n",
+ RequestedSize);
+}
+
+} // namespace __scudo
diff --git a/lib/scudo/scudo_errors.h b/lib/scudo/scudo_errors.h
new file mode 100644
index 000000000000..8b1af996be04
--- /dev/null
+++ b/lib/scudo/scudo_errors.h
@@ -0,0 +1,35 @@
+//===-- scudo_errors.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_errors.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ERRORS_H_
+#define SCUDO_ERRORS_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __scudo {
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
+ uptr MaxAlignment);
+void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize);
+void NORETURN reportRssLimitExceeded();
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+
+} // namespace __scudo
+
+#endif // SCUDO_ERRORS_H_
diff --git a/lib/scudo/scudo_flags.cpp b/lib/scudo/scudo_flags.cpp
index 2aff3ef1e8fa..c012471a8368 100644
--- a/lib/scudo/scudo_flags.cpp
+++ b/lib/scudo/scudo_flags.cpp
@@ -12,13 +12,12 @@
//===----------------------------------------------------------------------===//
#include "scudo_flags.h"
+#include "scudo_interface_internal.h"
#include "scudo_utils.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
-SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void);
-
namespace __scudo {
static Flags ScudoFlags; // Use via getFlags().
@@ -36,6 +35,14 @@ static void RegisterScudoFlags(FlagParser *parser, Flags *f) {
#undef SCUDO_FLAG
}
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+ return SANITIZER_STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
static const char *getScudoDefaultOptions() {
return (&__scudo_default_options) ? __scudo_default_options() : "";
}
@@ -55,6 +62,9 @@ void initFlags() {
RegisterScudoFlags(&ScudoParser, f);
RegisterCommonFlags(&ScudoParser);
+ // Override from compile definition.
+ ScudoParser.ParseString(getCompileDefinitionScudoDefaultOptions());
+
// Override from user-specified string.
ScudoParser.ParseString(getScudoDefaultOptions());
@@ -119,3 +129,9 @@ Flags *getFlags() {
}
} // namespace __scudo
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void) {
+ return "";
+}
+#endif
diff --git a/lib/scudo/scudo_interceptors.cpp b/lib/scudo/scudo_interceptors.cpp
deleted file mode 100644
index 735a13196757..000000000000
--- a/lib/scudo/scudo_interceptors.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-//===-- scudo_interceptors.cpp ----------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-///
-/// Linux specific malloc interception functions.
-///
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
-
-#include "scudo_allocator.h"
-
-#include "interception/interception.h"
-
-using namespace __scudo;
-
-INTERCEPTOR(void, free, void *ptr) {
- scudoFree(ptr, FromMalloc);
-}
-
-INTERCEPTOR(void, cfree, void *ptr) {
- scudoFree(ptr, FromMalloc);
-}
-
-INTERCEPTOR(void*, malloc, uptr size) {
- return scudoMalloc(size, FromMalloc);
-}
-
-INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
- return scudoRealloc(ptr, size);
-}
-
-INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
- return scudoCalloc(nmemb, size);
-}
-
-INTERCEPTOR(void*, valloc, uptr size) {
- return scudoValloc(size);
-}
-
-INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
- return scudoMemalign(alignment, size);
-}
-
-INTERCEPTOR(void*, __libc_memalign, uptr alignment, uptr size) {
- return scudoMemalign(alignment, size);
-}
-
-INTERCEPTOR(void*, pvalloc, uptr size) {
- return scudoPvalloc(size);
-}
-
-INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
- return scudoAlignedAlloc(alignment, size);
-}
-
-INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
- return scudoPosixMemalign(memptr, alignment, size);
-}
-
-INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
- return scudoMallocUsableSize(ptr);
-}
-
-INTERCEPTOR(int, mallopt, int cmd, int value) {
- return -1;
-}
-
-#endif // SANITIZER_LINUX
diff --git a/lib/scudo/scudo_interface_internal.h b/lib/scudo/scudo_interface_internal.h
index 3f39e0c4ee0b..3e520a50c83b 100644
--- a/lib/scudo/scudo_interface_internal.h
+++ b/lib/scudo/scudo_interface_internal.h
@@ -14,9 +14,20 @@
#ifndef SCUDO_INTERFACE_INTERNAL_H_
#define SCUDO_INTERFACE_INTERNAL_H_
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+using __sanitizer::s32;
+
extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __scudo_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit);
+
SANITIZER_INTERFACE_ATTRIBUTE
-void __scudo_set_rss_limit(unsigned long LimitMb, int HardLimit); // NOLINT
+void __scudo_print_stats();
} // extern "C"
#endif // SCUDO_INTERFACE_INTERNAL_H_
diff --git a/lib/scudo/scudo_malloc.cpp b/lib/scudo/scudo_malloc.cpp
new file mode 100644
index 000000000000..91a77b365823
--- /dev/null
+++ b/lib/scudo/scudo_malloc.cpp
@@ -0,0 +1,85 @@
+//===-- scudo_malloc.cpp ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Interceptors for malloc related functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+#include <stddef.h>
+
+using namespace __scudo;
+
+extern "C" {
+INTERCEPTOR_ATTRIBUTE void free(void *ptr) {
+ scudoDeallocate(ptr, 0, 0, FromMalloc);
+}
+
+INTERCEPTOR_ATTRIBUTE void *malloc(size_t size) {
+ return scudoAllocate(size, 0, FromMalloc);
+}
+
+INTERCEPTOR_ATTRIBUTE void *realloc(void *ptr, size_t size) {
+ return scudoRealloc(ptr, size);
+}
+
+INTERCEPTOR_ATTRIBUTE void *calloc(size_t nmemb, size_t size) {
+ return scudoCalloc(nmemb, size);
+}
+
+INTERCEPTOR_ATTRIBUTE void *valloc(size_t size) {
+ return scudoValloc(size);
+}
+
+INTERCEPTOR_ATTRIBUTE
+int posix_memalign(void **memptr, size_t alignment, size_t size) {
+ return scudoPosixMemalign(memptr, alignment, size);
+}
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR_ATTRIBUTE void cfree(void *ptr) ALIAS("free");
+#endif
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR_ATTRIBUTE void *memalign(size_t alignment, size_t size) {
+ return scudoAllocate(size, alignment, FromMemalign);
+}
+
+INTERCEPTOR_ATTRIBUTE
+void *__libc_memalign(size_t alignment, size_t size) ALIAS("memalign");
+#endif
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR_ATTRIBUTE void *pvalloc(size_t size) {
+ return scudoPvalloc(size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR_ATTRIBUTE void *aligned_alloc(size_t alignment, size_t size) {
+ return scudoAlignedAlloc(alignment, size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+INTERCEPTOR_ATTRIBUTE size_t malloc_usable_size(void *ptr) {
+ return scudoMallocUsableSize(ptr);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+INTERCEPTOR_ATTRIBUTE int mallopt(int cmd, int value) {
+ return -1;
+}
+#endif
+} // extern "C"
diff --git a/lib/scudo/scudo_new_delete.cpp b/lib/scudo/scudo_new_delete.cpp
index c5a1abbed82b..daa3b47dc727 100644
--- a/lib/scudo/scudo_new_delete.cpp
+++ b/lib/scudo/scudo_new_delete.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "scudo_allocator.h"
+#include "scudo_errors.h"
#include "interception/interception.h"
@@ -24,51 +25,84 @@ using namespace __scudo;
// Fake std::nothrow_t to avoid including <new>.
namespace std {
struct nothrow_t {};
+enum class align_val_t: size_t {};
} // namespace std
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY_ALIGN(Type, Align, NoThrow) \
+ void *Ptr = scudoAllocate(size, static_cast<uptr>(Align), Type); \
+ if (!NoThrow && UNLIKELY(!Ptr)) reportOutOfMemory(size); \
+ return Ptr;
+#define OPERATOR_NEW_BODY(Type, NoThrow) \
+ OPERATOR_NEW_BODY_ALIGN(Type, 0, NoThrow)
+
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size)
+{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size)
+{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/true); }
CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size) {
- void *res = scudoMalloc(size, FromNew);
- if (UNLIKELY(!res)) DieOnFailure::OnOOM();
- return res;
-}
-CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size) {
- void *res = scudoMalloc(size, FromNewArray);
- if (UNLIKELY(!res)) DieOnFailure::OnOOM();
- return res;
-}
-CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size, std::nothrow_t const&) {
- return scudoMalloc(size, FromNew);
-}
-CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size, std::nothrow_t const&) {
- return scudoMalloc(size, FromNewArray);
-}
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/true); }
+#define OPERATOR_DELETE_BODY(Type) \
+ scudoDeallocate(ptr, 0, 0, Type);
+#define OPERATOR_DELETE_BODY_SIZE(Type) \
+ scudoDeallocate(ptr, size, 0, Type);
+#define OPERATOR_DELETE_BODY_ALIGN(Type) \
+ scudoDeallocate(ptr, 0, static_cast<uptr>(align), Type);
+#define OPERATOR_DELETE_BODY_SIZE_ALIGN(Type) \
+ scudoDeallocate(ptr, size, static_cast<uptr>(align), Type);
+
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FromNewArray); }
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr) NOEXCEPT {
- return scudoFree(ptr, FromNew);
-}
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr) NOEXCEPT {
- return scudoFree(ptr, FromNewArray);
-}
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, std::nothrow_t const&) NOEXCEPT {
- return scudoFree(ptr, FromNew);
-}
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const&) NOEXCEPT {
- return scudoFree(ptr, FromNewArray);
-}
+void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, size_t size) NOEXCEPT {
- scudoSizedFree(ptr, size, FromNew);
-}
+void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNew); }
CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, size_t size) NOEXCEPT {
- scudoSizedFree(ptr, size, FromNewArray);
-}
+void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNewArray); }
diff --git a/lib/scudo/scudo_platform.h b/lib/scudo/scudo_platform.h
index e1c9c32e9a62..3a6f4be69dbd 100644
--- a/lib/scudo/scudo_platform.h
+++ b/lib/scudo/scudo_platform.h
@@ -43,7 +43,11 @@
// Maximum number of TSDs that can be created for the Shared model.
#ifndef SCUDO_SHARED_TSD_POOL_SIZE
-# define SCUDO_SHARED_TSD_POOL_SIZE 32U
+# if SANITIZER_ANDROID
+# define SCUDO_SHARED_TSD_POOL_SIZE 2U
+# else
+# define SCUDO_SHARED_TSD_POOL_SIZE 32U
+# endif // SANITIZER_ANDROID
#endif // SCUDO_SHARED_TSD_POOL_SIZE
// The following allows the public interface functions to be disabled.
@@ -51,6 +55,16 @@
# define SCUDO_CAN_USE_PUBLIC_INTERFACE 1
#endif
+// Hooks in the allocation & deallocation paths can become a security concern if
+// implemented improperly, or if overwritten by an attacker. Use with caution.
+#ifndef SCUDO_CAN_USE_HOOKS
+# if SANITIZER_FUCHSIA
+# define SCUDO_CAN_USE_HOOKS 1
+# else
+# define SCUDO_CAN_USE_HOOKS 0
+# endif // SANITIZER_FUCHSIA
+#endif // SCUDO_CAN_USE_HOOKS
+
namespace __scudo {
#if SANITIZER_CAN_USE_ALLOCATOR64
diff --git a/lib/scudo/scudo_termination.cpp b/lib/scudo/scudo_termination.cpp
index c441ff3c126a..4237d3bc1865 100644
--- a/lib/scudo/scudo_termination.cpp
+++ b/lib/scudo/scudo_termination.cpp
@@ -35,7 +35,7 @@ void SetCheckFailedCallback(CheckFailedCallbackType callback) {}
void NORETURN CheckFailed(const char *File, int Line, const char *Condition,
u64 Value1, u64 Value2) {
- __scudo::dieWithMessage("Scudo CHECK failed: %s:%d %s (%lld, %lld)\n",
+ __scudo::dieWithMessage("CHECK failed at %s:%d %s (%lld, %lld)\n",
File, Line, Condition, Value1, Value2);
}
diff --git a/lib/scudo/scudo_tsd.h b/lib/scudo/scudo_tsd.h
index 80464b5ea1e4..2bd78716af69 100644
--- a/lib/scudo/scudo_tsd.h
+++ b/lib/scudo/scudo_tsd.h
@@ -23,11 +23,11 @@
namespace __scudo {
-struct ALIGNED(64) ScudoTSD {
- AllocatorCache Cache;
+struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
+ AllocatorCacheT Cache;
uptr QuarantineCachePlaceHolder[4];
- void init(bool Shared);
+ void init();
void commitBack();
INLINE bool tryLock() {
@@ -36,29 +36,23 @@ struct ALIGNED(64) ScudoTSD {
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
- atomic_store_relaxed(&Precedence, MonotonicNanoTime());
+ atomic_store_relaxed(&Precedence, static_cast<uptr>(
+ MonotonicNanoTime() >> FIRST_32_SECOND_64(16, 0)));
return false;
}
INLINE void lock() {
- Mutex.Lock();
atomic_store_relaxed(&Precedence, 0);
+ Mutex.Lock();
}
- INLINE void unlock() {
- if (!UnlockRequired)
- return;
- Mutex.Unlock();
- }
+ INLINE void unlock() { Mutex.Unlock(); }
- INLINE u64 getPrecedence() {
- return atomic_load_relaxed(&Precedence);
- }
+ INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
private:
- bool UnlockRequired;
StaticSpinMutex Mutex;
- atomic_uint64_t Precedence;
+ atomic_uintptr_t Precedence;
};
void initThread(bool MinimalInit);
diff --git a/lib/scudo/scudo_tsd_exclusive.cpp b/lib/scudo/scudo_tsd_exclusive.cpp
index 1084dfac91e1..74e797580be7 100644
--- a/lib/scudo/scudo_tsd_exclusive.cpp
+++ b/lib/scudo/scudo_tsd_exclusive.cpp
@@ -50,7 +50,7 @@ static void teardownThread(void *Ptr) {
static void initOnce() {
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
initScudo();
- FallbackTSD.init(/*Shared=*/true);
+ FallbackTSD.init();
}
void initThread(bool MinimalInit) {
@@ -59,7 +59,7 @@ void initThread(bool MinimalInit) {
return;
CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
GetPthreadDestructorIterations())), 0);
- TSD.init(/*Shared=*/false);
+ TSD.init();
ScudoThreadState = ThreadInitialized;
}
diff --git a/lib/scudo/scudo_tsd_exclusive.inc b/lib/scudo/scudo_tsd_exclusive.inc
index 567b6a1edd12..1fa9dcdfd20d 100644
--- a/lib/scudo/scudo_tsd_exclusive.inc
+++ b/lib/scudo/scudo_tsd_exclusive.inc
@@ -35,11 +35,13 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
initThread(MinimalInit);
}
-ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
+ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
FallbackTSD.lock();
+ *UnlockRequired = true;
return &FallbackTSD;
}
+ *UnlockRequired = false;
return &TSD;
}
diff --git a/lib/scudo/scudo_tsd_shared.cpp b/lib/scudo/scudo_tsd_shared.cpp
index 3e13e5d3a109..8853894c00a2 100644
--- a/lib/scudo/scudo_tsd_shared.cpp
+++ b/lib/scudo/scudo_tsd_shared.cpp
@@ -23,6 +23,13 @@ pthread_key_t PThreadKey;
static atomic_uint32_t CurrentIndex;
static ScudoTSD *TSDs;
static u32 NumberOfTSDs;
+static u32 CoPrimes[SCUDO_SHARED_TSD_POOL_SIZE];
+static u32 NumberOfCoPrimes = 0;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ScudoTSD *CurrentTSD;
+#endif
static void initOnce() {
CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0);
@@ -31,13 +38,21 @@ static void initOnce() {
static_cast<u32>(SCUDO_SHARED_TSD_POOL_SIZE));
TSDs = reinterpret_cast<ScudoTSD *>(
MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs"));
- for (u32 i = 0; i < NumberOfTSDs; i++)
- TSDs[i].init(/*Shared=*/true);
+ for (u32 I = 0; I < NumberOfTSDs; I++) {
+ TSDs[I].init();
+ u32 A = I + 1;
+ u32 B = NumberOfTSDs;
+ while (B != 0) { const u32 T = A; A = B; B = T % B; }
+ if (A == 1)
+ CoPrimes[NumberOfCoPrimes++] = I + 1;
+ }
}
ALWAYS_INLINE void setCurrentTSD(ScudoTSD *TSD) {
#if SANITIZER_ANDROID
*get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
+#elif SANITIZER_LINUX
+ CurrentTSD = TSD;
#else
CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(TSD)), 0);
#endif // SANITIZER_ANDROID
@@ -50,34 +65,42 @@ void initThread(bool MinimalInit) {
setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
}
-ScudoTSD *getTSDAndLockSlow() {
- ScudoTSD *TSD;
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) {
if (NumberOfTSDs > 1) {
- // Go through all the contexts and find the first unlocked one.
- for (u32 i = 0; i < NumberOfTSDs; i++) {
- TSD = &TSDs[i];
- if (TSD->tryLock()) {
- setCurrentTSD(TSD);
- return TSD;
+ // Use the Precedence of the current TSD as our random seed. Since we are in
+ // the slow path, it means that tryLock failed, and as a result it's very
+ // likely that said Precedence is non-zero.
+ u32 RandState = static_cast<u32>(TSD->getPrecedence());
+ const u32 R = Rand(&RandState);
+ const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
+ u32 Index = R % NumberOfTSDs;
+ uptr LowestPrecedence = UINTPTR_MAX;
+ ScudoTSD *CandidateTSD = nullptr;
+ // Go randomly through at most 4 contexts and find a candidate.
+ for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+ if (TSDs[Index].tryLock()) {
+ setCurrentTSD(&TSDs[Index]);
+ return &TSDs[Index];
}
- }
- // No luck, find the one with the lowest Precedence, and slow lock it.
- u64 LowestPrecedence = UINT64_MAX;
- for (u32 i = 0; i < NumberOfTSDs; i++) {
- u64 Precedence = TSDs[i].getPrecedence();
- if (Precedence && Precedence < LowestPrecedence) {
- TSD = &TSDs[i];
+ const uptr Precedence = TSDs[Index].getPrecedence();
+ // A 0 precedence here means another thread just locked this TSD.
+ if (UNLIKELY(Precedence == 0))
+ continue;
+ if (Precedence < LowestPrecedence) {
+ CandidateTSD = &TSDs[Index];
LowestPrecedence = Precedence;
}
+ Index += Inc;
+ if (Index >= NumberOfTSDs)
+ Index -= NumberOfTSDs;
}
- if (LIKELY(LowestPrecedence != UINT64_MAX)) {
- TSD->lock();
- setCurrentTSD(TSD);
- return TSD;
+ if (CandidateTSD) {
+ CandidateTSD->lock();
+ setCurrentTSD(CandidateTSD);
+ return CandidateTSD;
}
}
// Last resort, stick with the current one.
- TSD = getCurrentTSD();
TSD->lock();
return TSD;
}
diff --git a/lib/scudo/scudo_tsd_shared.inc b/lib/scudo/scudo_tsd_shared.inc
index 79fcd651ed2d..9dad756b5386 100644
--- a/lib/scudo/scudo_tsd_shared.inc
+++ b/lib/scudo/scudo_tsd_shared.inc
@@ -19,9 +19,16 @@
extern pthread_key_t PThreadKey;
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ScudoTSD *CurrentTSD;
+#endif
+
ALWAYS_INLINE ScudoTSD* getCurrentTSD() {
#if SANITIZER_ANDROID
return reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
+#elif SANITIZER_LINUX
+ return CurrentTSD;
#else
return reinterpret_cast<ScudoTSD *>(pthread_getspecific(PThreadKey));
#endif // SANITIZER_ANDROID
@@ -33,16 +40,17 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
initThread(MinimalInit);
}
-ScudoTSD *getTSDAndLockSlow();
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD);
-ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
+ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
ScudoTSD *TSD = getCurrentTSD();
- CHECK(TSD && "No TSD associated with the current thread!");
+ DCHECK(TSD && "No TSD associated with the current thread!");
+ *UnlockRequired = true;
// Try to lock the currently associated context.
if (TSD->tryLock())
return TSD;
// If it failed, go the slow path.
- return getTSDAndLockSlow();
+ return getTSDAndLockSlow(TSD);
}
#endif // !SCUDO_TSD_EXCLUSIVE
diff --git a/lib/scudo/scudo_utils.cpp b/lib/scudo/scudo_utils.cpp
index 2f936bf9e780..d5788d20ca46 100644
--- a/lib/scudo/scudo_utils.cpp
+++ b/lib/scudo/scudo_utils.cpp
@@ -17,7 +17,10 @@
# include <cpuid.h>
#elif defined(__arm__) || defined(__aarch64__)
# include "sanitizer_common/sanitizer_getauxval.h"
-# if SANITIZER_POSIX
+# if SANITIZER_FUCHSIA
+# include <zircon/syscalls.h>
+# include <zircon/features.h>
+# elif SANITIZER_POSIX
# include "sanitizer_common/sanitizer_posix.h"
# include <fcntl.h>
# endif
@@ -38,12 +41,18 @@ extern int VSNPrintf(char *buff, int buff_length, const char *format,
namespace __scudo {
FORMAT(1, 2) void NORETURN dieWithMessage(const char *Format, ...) {
+ static const char ScudoError[] = "Scudo ERROR: ";
+ static constexpr uptr PrefixSize = sizeof(ScudoError) - 1;
// Our messages are tiny, 256 characters is more than enough.
char Message[256];
va_list Args;
va_start(Args, Format);
- VSNPrintf(Message, sizeof(Message), Format, Args);
+ internal_memcpy(Message, ScudoError, PrefixSize);
+ VSNPrintf(Message + PrefixSize, sizeof(Message) - PrefixSize, Format, Args);
va_end(Args);
+ LogMessageOnPrintf(Message);
+ if (common_flags()->abort_on_error)
+ SetAbortMessage(Message);
RawWrite(Message);
Die();
}
@@ -107,9 +116,17 @@ INLINE bool areBionicGlobalsInitialized() {
}
bool hasHardwareCRC32() {
+#if SANITIZER_FUCHSIA
+ u32 HWCap;
+ zx_status_t Status = zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+ if (Status != ZX_OK || (HWCap & ZX_ARM64_FEATURE_ISA_CRC32) == 0)
+ return false;
+ return true;
+#else
if (&getauxval && areBionicGlobalsInitialized())
return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
return hasHardwareCRC32ARMPosix();
+#endif // SANITIZER_FUCHSIA
}
#else
bool hasHardwareCRC32() { return false; }