aboutsummaryrefslogtreecommitdiffstats
path: root/lib/scudo/standalone
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-08-20 20:51:06 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-08-20 20:51:06 +0000
commit8f3cadc28cb2bb9e8f9d69eeaaea1f57f2f7b2ab (patch)
tree05a2b6ec297fe6283d9557c791445d1daf88dcd0 /lib/scudo/standalone
parent63714eb5809e39666dec2454c354195e76f916ba (diff)
downloadsrc-8f3cadc28cb2bb9e8f9d69eeaaea1f57f2f7b2ab.tar.gz
src-8f3cadc28cb2bb9e8f9d69eeaaea1f57f2f7b2ab.zip
Vendor import of stripped compiler-rt trunk r366426 (just before the release_90vendor/compiler-rt/compiler-rt-trunk-r366426
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=351282 svn path=/vendor/compiler-rt/compiler-rt-trunk-r366426/; revision=351283; tag=vendor/compiler-rt/compiler-rt-trunk-r366426
Diffstat (limited to 'lib/scudo/standalone')
-rw-r--r--lib/scudo/standalone/allocator_config.h80
-rw-r--r--lib/scudo/standalone/atomic_helpers.h139
-rw-r--r--lib/scudo/standalone/bytemap.h111
-rw-r--r--lib/scudo/standalone/checksum.cc70
-rw-r--r--lib/scudo/standalone/checksum.h54
-rw-r--r--lib/scudo/standalone/chunk.h156
-rw-r--r--lib/scudo/standalone/combined.h557
-rw-r--r--lib/scudo/standalone/common.cc32
-rw-r--r--lib/scudo/standalone/common.h176
-rw-r--r--lib/scudo/standalone/crc32_hw.cc19
-rw-r--r--lib/scudo/standalone/flags.cc57
-rw-r--r--lib/scudo/standalone/flags.h30
-rw-r--r--lib/scudo/standalone/flags.inc50
-rw-r--r--lib/scudo/standalone/flags_parser.cc164
-rw-r--r--lib/scudo/standalone/flags_parser.h55
-rw-r--r--lib/scudo/standalone/fuchsia.cc189
-rw-r--r--lib/scudo/standalone/fuchsia.h31
-rw-r--r--lib/scudo/standalone/interface.h29
-rw-r--r--lib/scudo/standalone/internal_defs.h135
-rw-r--r--lib/scudo/standalone/linux.cc171
-rw-r--r--lib/scudo/standalone/linux.h70
-rw-r--r--lib/scudo/standalone/list.h156
-rw-r--r--lib/scudo/standalone/local_cache.h181
-rw-r--r--lib/scudo/standalone/mutex.h73
-rw-r--r--lib/scudo/standalone/platform.h70
-rw-r--r--lib/scudo/standalone/primary32.h401
-rw-r--r--lib/scudo/standalone/primary64.h381
-rw-r--r--lib/scudo/standalone/quarantine.h289
-rw-r--r--lib/scudo/standalone/release.h262
-rw-r--r--lib/scudo/standalone/report.cc192
-rw-r--r--lib/scudo/standalone/report.h57
-rw-r--r--lib/scudo/standalone/secondary.cc136
-rw-r--r--lib/scudo/standalone/secondary.h97
-rw-r--r--lib/scudo/standalone/size_class_map.h149
-rw-r--r--lib/scudo/standalone/stats.h105
-rw-r--r--lib/scudo/standalone/string_utils.cc236
-rw-r--r--lib/scudo/standalone/string_utils.h42
-rw-r--r--lib/scudo/standalone/tsd.h66
-rw-r--r--lib/scudo/standalone/tsd_exclusive.h118
-rw-r--r--lib/scudo/standalone/tsd_shared.h169
-rw-r--r--lib/scudo/standalone/vector.h118
-rw-r--r--lib/scudo/standalone/wrappers_c.cc39
-rw-r--r--lib/scudo/standalone/wrappers_c.h52
-rw-r--r--lib/scudo/standalone/wrappers_c.inc176
-rw-r--r--lib/scudo/standalone/wrappers_c_bionic.cc49
-rw-r--r--lib/scudo/standalone/wrappers_c_checks.h67
-rw-r--r--lib/scudo/standalone/wrappers_cpp.cc107
47 files changed, 6163 insertions, 0 deletions
diff --git a/lib/scudo/standalone/allocator_config.h b/lib/scudo/standalone/allocator_config.h
new file mode 100644
index 000000000000..06ec4f3f795a
--- /dev/null
+++ b/lib/scudo/standalone/allocator_config.h
@@ -0,0 +1,80 @@
+//===-- allocator_config.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_CONFIG_H_
+#define SCUDO_ALLOCATOR_CONFIG_H_
+
+#include "combined.h"
+#include "common.h"
+#include "flags.h"
+#include "primary32.h"
+#include "primary64.h"
+#include "size_class_map.h"
+#include "tsd_exclusive.h"
+#include "tsd_shared.h"
+
+namespace scudo {
+
+// Default configurations for various platforms.
+
+struct DefaultConfig {
+ using SizeClassMap = DefaultSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 1GB Regions
+ typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+#else
+ // 512KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+#endif
+ template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+};
+
+struct AndroidConfig {
+ using SizeClassMap = AndroidSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 1GB regions
+ typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+#else
+ // 512KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+#endif
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
+};
+
+struct AndroidSvelteConfig {
+ using SizeClassMap = SvelteSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 512MB regions
+ typedef SizeClassAllocator64<SizeClassMap, 29U> Primary;
+#else
+ // 256KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 18U> Primary;
+#endif
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
+};
+
+struct FuchsiaConfig {
+ // 1GB Regions
+ typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs.
+};
+
+#if SCUDO_ANDROID
+typedef AndroidConfig Config;
+#elif SCUDO_FUCHSIA
+typedef FuchsiaConfig Config;
+#else
+typedef DefaultConfig Config;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_CONFIG_H_
diff --git a/lib/scudo/standalone/atomic_helpers.h b/lib/scudo/standalone/atomic_helpers.h
new file mode 100644
index 000000000000..47037d764e25
--- /dev/null
+++ b/lib/scudo/standalone/atomic_helpers.h
@@ -0,0 +1,139 @@
+//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ATOMIC_H_
+#define SCUDO_ATOMIC_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+enum memory_order {
+ memory_order_relaxed = 0,
+ memory_order_consume = 1,
+ memory_order_acquire = 2,
+ memory_order_release = 3,
+ memory_order_acq_rel = 4,
+ memory_order_seq_cst = 5
+};
+COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
+COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
+COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
+COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
+COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
+COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
+
+struct atomic_u8 {
+ typedef u8 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u16 {
+ typedef u16 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_s32 {
+ typedef s32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u32 {
+ typedef u32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u64 {
+ typedef u64 Type;
+ // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
+ ALIGNED(8) volatile Type ValDoNotUse;
+};
+
+struct atomic_uptr {
+ typedef uptr Type;
+ volatile Type ValDoNotUse;
+};
+
+template <typename T>
+INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type V;
+ __atomic_load(&A->ValDoNotUse, &V, MO);
+ return V;
+}
+
+template <typename T>
+INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ __atomic_store(&A->ValDoNotUse, &V, MO);
+}
+
+INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type R;
+ __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
+ return R;
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
+ __ATOMIC_RELAXED);
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
+ __ATOMIC_RELAXED);
+}
+
+// Clutter-reducing helpers.
+
+template <typename T>
+INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
+ return atomic_load(A, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
+ atomic_store(A, V, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_compare_exchange(volatile T *A,
+ typename T::Type Cmp,
+ typename T::Type Xchg) {
+ atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+ return Cmp;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_ATOMIC_H_
diff --git a/lib/scudo/standalone/bytemap.h b/lib/scudo/standalone/bytemap.h
new file mode 100644
index 000000000000..caeeb2fac879
--- /dev/null
+++ b/lib/scudo/standalone/bytemap.h
@@ -0,0 +1,111 @@
+//===-- bytemap.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_BYTEMAP_H_
+#define SCUDO_BYTEMAP_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+namespace scudo {
+
+template <uptr Size> class FlatByteMap {
+public:
+ void initLinkerInitialized() {
+ Map = reinterpret_cast<u8 *>(map(nullptr, Size, "scudo:bytemap"));
+ }
+ void init() { initLinkerInitialized(); }
+
+ void unmapTestOnly() { unmap(reinterpret_cast<void *>(Map), Size); }
+
+ void set(uptr Index, u8 Value) {
+ DCHECK_LT(Index, Size);
+ DCHECK_EQ(0U, Map[Index]);
+ Map[Index] = Value;
+ }
+ u8 operator[](uptr Index) {
+ DCHECK_LT(Index, Size);
+ return Map[Index];
+ }
+
+private:
+ u8 *Map;
+};
+
+template <uptr Level1Size, uptr Level2Size> class TwoLevelByteMap {
+public:
+ void initLinkerInitialized() {
+ Level1Map = reinterpret_cast<atomic_uptr *>(
+ map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap"));
+ }
+ void init() {
+ Mutex.init();
+ initLinkerInitialized();
+ }
+
+ void reset() {
+ for (uptr I = 0; I < Level1Size; I++) {
+ u8 *P = get(I);
+ if (!P)
+ continue;
+ unmap(P, Level2Size);
+ }
+ memset(Level1Map, 0, sizeof(atomic_uptr) * Level1Size);
+ }
+
+ void unmapTestOnly() {
+ reset();
+ unmap(reinterpret_cast<void *>(Level1Map),
+ sizeof(atomic_uptr) * Level1Size);
+ }
+
+ uptr size() const { return Level1Size * Level2Size; }
+
+ void set(uptr Index, u8 Value) {
+ DCHECK_LT(Index, Level1Size * Level2Size);
+ u8 *Level2Map = getOrCreate(Index / Level2Size);
+ DCHECK_EQ(0U, Level2Map[Index % Level2Size]);
+ Level2Map[Index % Level2Size] = Value;
+ }
+
+ u8 operator[](uptr Index) const {
+ DCHECK_LT(Index, Level1Size * Level2Size);
+ u8 *Level2Map = get(Index / Level2Size);
+ if (!Level2Map)
+ return 0;
+ return Level2Map[Index % Level2Size];
+ }
+
+private:
+ u8 *get(uptr Index) const {
+ DCHECK_LT(Index, Level1Size);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&Level1Map[Index], memory_order_acquire));
+ }
+
+ u8 *getOrCreate(uptr Index) {
+ u8 *Res = get(Index);
+ if (!Res) {
+ ScopedLock L(Mutex);
+ if (!(Res = get(Index))) {
+ Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap"));
+ atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res),
+ memory_order_release);
+ }
+ }
+ return Res;
+ }
+
+ atomic_uptr *Level1Map;
+ HybridMutex Mutex;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_BYTEMAP_H_
diff --git a/lib/scudo/standalone/checksum.cc b/lib/scudo/standalone/checksum.cc
new file mode 100644
index 000000000000..0896d5bdccd5
--- /dev/null
+++ b/lib/scudo/standalone/checksum.cc
@@ -0,0 +1,70 @@
+//===-- checksum.cc ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+#include "atomic_helpers.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+#include <cpuid.h>
+#elif defined(__arm__) || defined(__aarch64__)
+#if SCUDO_FUCHSIA
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+#else
+#include <sys/auxv.h>
+#endif
+#endif
+
+namespace scudo {
+
+Checksum HashAlgorithm = {Checksum::BSD};
+
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
+#ifndef bit_SSE4_2
+#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+#endif
+
+bool hasHardwareCRC32() {
+ u32 Eax, Ebx = 0, Ecx = 0, Edx = 0;
+ __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
+ const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
+ (Edx == signature_INTEL_edx) &&
+ (Ecx == signature_INTEL_ecx);
+ const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) &&
+ (Ecx == signature_AMD_ecx);
+ if (!IsIntel && !IsAMD)
+ return false;
+ __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
+ return !!(Ecx & bit_SSE4_2);
+}
+
+#elif defined(__arm__) || defined(__aarch64__)
+#ifndef AT_HWCAP
+#define AT_HWCAP 16
+#endif
+#ifndef HWCAP_CRC32
+#define HWCAP_CRC32 (1U << 7) // HWCAP_CRC32 is missing on older platforms.
+#endif
+
+bool hasHardwareCRC32() {
+#if SCUDO_FUCHSIA
+ u32 HWCap;
+ const zx_status_t Status =
+ zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+ if (Status != ZX_OK)
+ return false;
+ return !!(HWCap & ZX_ARM64_FEATURE_ISA_CRC32);
+#else
+ return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
+#endif // SCUDO_FUCHSIA
+}
+#endif // defined(__x86_64__) || defined(__i386__)
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/checksum.h b/lib/scudo/standalone/checksum.h
new file mode 100644
index 000000000000..092342fd6efb
--- /dev/null
+++ b/lib/scudo/standalone/checksum.h
@@ -0,0 +1,54 @@
+//===-- checksum.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKSUM_H_
+#define SCUDO_CHECKSUM_H_
+
+#include "internal_defs.h"
+
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -msse4.2
+// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+
+#ifdef __SSE4_2__
+#include <smmintrin.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+#endif
+#ifdef __ARM_FEATURE_CRC32
+#include <arm_acle.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+#endif
+
+namespace scudo {
+
+enum class Checksum : u8 {
+ BSD = 0,
+ HardwareCRC32 = 1,
+};
+
+// BSD checksum, unlike a software CRC32, doesn't use any array lookup. We save
+// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
+// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
+// odds with CRC32, but enough for our needs.
+INLINE u16 computeBSDChecksum(u16 Sum, uptr Data) {
+ for (u8 I = 0; I < sizeof(Data); I++) {
+ Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
+ Sum = static_cast<u16>(Sum + (Data & 0xff));
+ Data >>= 8;
+ }
+ return Sum;
+}
+
+bool hasHardwareCRC32();
+WEAK u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKSUM_H_
diff --git a/lib/scudo/standalone/chunk.h b/lib/scudo/standalone/chunk.h
new file mode 100644
index 000000000000..76ef661b0dc5
--- /dev/null
+++ b/lib/scudo/standalone/chunk.h
@@ -0,0 +1,156 @@
+//===-- chunk.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHUNK_H_
+#define SCUDO_CHUNK_H_
+
+#include "platform.h"
+
+#include "atomic_helpers.h"
+#include "checksum.h"
+#include "common.h"
+#include "report.h"
+
+namespace scudo {
+
+extern Checksum HashAlgorithm;
+
+INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
+ // If the hardware CRC32 feature is defined here, it was enabled everywhere,
+ // as opposed to only for crc32_hw.cc. This means that other hardware specific
+ // instructions were likely emitted at other places, and as a result there is
+ // no reason to not use it here.
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+ u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
+ for (uptr I = 0; I < ArraySize; I++)
+ Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
+ return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+#else
+ if (HashAlgorithm == Checksum::HardwareCRC32) {
+ u32 Crc = computeHardwareCRC32(Seed, Value);
+ for (uptr I = 0; I < ArraySize; I++)
+ Crc = computeHardwareCRC32(Crc, Array[I]);
+ return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+ } else {
+ u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed & 0xffff), Value);
+ for (uptr I = 0; I < ArraySize; I++)
+ Checksum = computeBSDChecksum(Checksum, Array[I]);
+ return Checksum;
+ }
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+}
+
+namespace Chunk {
+
+// Note that in an ideal world, `State` and `Origin` should be `enum class`, and
+// the associated `UnpackedHeader` fields of their respective enum class type
+// but https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 prevents it from
+// happening, as it will error, complaining the number of bits is not enough.
+enum Origin : u8 {
+ Malloc = 0,
+ New = 1,
+ NewArray = 2,
+ Memalign = 3,
+};
+
+enum State : u8 { Available = 0, Allocated = 1, Quarantined = 2 };
+
+typedef u64 PackedHeader;
+// Update the 'Mask' constants to reflect changes in this structure.
+struct UnpackedHeader {
+ u64 Checksum : 16;
+ u64 ClassId : 8;
+ u64 SizeOrUnusedBytes : 20;
+ u8 State : 2;
+ u8 Origin : 2;
+ u64 Offset : 16;
+};
+typedef atomic_u64 AtomicPackedHeader;
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+// Those constants are required to silence some -Werror=conversion errors when
+// assigning values to the related bitfield variables.
+constexpr uptr ChecksumMask = (1UL << 16) - 1;
+constexpr uptr ClassIdMask = (1UL << 8) - 1;
+constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
+constexpr uptr StateMask = (1UL << 2) - 1;
+constexpr uptr OriginMask = (1UL << 2) - 1;
+constexpr uptr OffsetMask = (1UL << 16) - 1;
+
+constexpr uptr getHeaderSize() {
+ return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+ return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize());
+}
+
+INLINE
+const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+ return reinterpret_cast<const AtomicPackedHeader *>(
+ reinterpret_cast<uptr>(Ptr) - getHeaderSize());
+}
+
+// We do not need a cryptographically strong hash for the checksum, but a CRC
+// type function that can alert us in the event a header is invalid or
+// corrupted. Ideally slightly better than a simple xor of all fields.
+static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
+ UnpackedHeader *Header) {
+ UnpackedHeader ZeroChecksumHeader = *Header;
+ ZeroChecksumHeader.Checksum = 0;
+ uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+ memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+ return computeChecksum(Cookie, reinterpret_cast<uptr>(Ptr), HeaderHolder,
+ ARRAY_SIZE(HeaderHolder));
+}
+
+INLINE void storeHeader(u32 Cookie, void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ NewUnpackedHeader->Checksum =
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
+}
+
+INLINE
+void loadHeader(u32 Cookie, const void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ if (UNLIKELY(NewUnpackedHeader->Checksum !=
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader)))
+ reportHeaderCorruption(const_cast<void *>(Ptr));
+}
+
+INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
+ UnpackedHeader *NewUnpackedHeader,
+ UnpackedHeader *OldUnpackedHeader) {
+ NewUnpackedHeader->Checksum =
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
+ if (UNLIKELY(!atomic_compare_exchange_strong(
+ getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
+ memory_order_relaxed)))
+ reportHeaderRace(Ptr);
+}
+
+INLINE
+bool isValid(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ return NewUnpackedHeader->Checksum ==
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+}
+
+} // namespace Chunk
+
+} // namespace scudo
+
+#endif // SCUDO_CHUNK_H_
diff --git a/lib/scudo/standalone/combined.h b/lib/scudo/standalone/combined.h
new file mode 100644
index 000000000000..4c1c1196bf8f
--- /dev/null
+++ b/lib/scudo/standalone/combined.h
@@ -0,0 +1,557 @@
+//===-- combined.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMBINED_H_
+#define SCUDO_COMBINED_H_
+
+#include "chunk.h"
+#include "common.h"
+#include "flags.h"
+#include "flags_parser.h"
+#include "interface.h"
+#include "local_cache.h"
+#include "quarantine.h"
+#include "report.h"
+#include "secondary.h"
+#include "tsd.h"
+
+namespace scudo {
+
+template <class Params> class Allocator {
+public:
+ using PrimaryT = typename Params::Primary;
+ using CacheT = typename PrimaryT::CacheT;
+ typedef Allocator<Params> ThisT;
+ typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
+
+ struct QuarantineCallback {
+ explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
+ : Allocator(Instance), Cache(LocalCache) {}
+
+ // Chunk recycling function, returns a quarantined chunk to the backend,
+ // first making sure it hasn't been tampered with.
+ void recycle(void *Ptr) {
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+ if (UNLIKELY(Header.State != Chunk::State::Quarantined))
+ reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
+
+ Chunk::UnpackedHeader NewHeader = Header;
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+
+ void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
+ const uptr ClassId = Header.ClassId;
+ if (ClassId)
+ Cache.deallocate(ClassId, BlockBegin);
+ else
+ Allocator.Secondary.deallocate(BlockBegin);
+ }
+
+ // We take a shortcut when allocating a quarantine batch by working with the
+ // appropriate class ID instead of using Size. The compiler should optimize
+ // the class ID computation and work with the associated cache directly.
+ void *allocate(UNUSED uptr Size) {
+ const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+ sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+ void *Ptr = Cache.allocate(QuarantineClassId);
+ // Quarantine batch allocation failure is fatal.
+ if (UNLIKELY(!Ptr))
+ reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
+
+ Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
+ Chunk::getHeaderSize());
+ Chunk::UnpackedHeader Header = {};
+ Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
+ Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
+ Header.State = Chunk::State::Allocated;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
+
+ return Ptr;
+ }
+
+ void deallocate(void *Ptr) {
+ const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+ sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+ DCHECK_EQ(Header.ClassId, QuarantineClassId);
+ DCHECK_EQ(Header.Offset, 0);
+ DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
+
+ Chunk::UnpackedHeader NewHeader = Header;
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+ Cache.deallocate(QuarantineClassId,
+ reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ Chunk::getHeaderSize()));
+ }
+
+ private:
+ ThisT &Allocator;
+ CacheT &Cache;
+ };
+
+ typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
+ typedef typename QuarantineT::CacheT QuarantineCacheT;
+
+ void initLinkerInitialized() {
+ performSanityChecks();
+
+ // Check if hardware CRC32 is supported in the binary and by the platform,
+ // if so, opt for the CRC32 hardware version of the checksum.
+ if (&computeHardwareCRC32 && hasHardwareCRC32())
+ HashAlgorithm = Checksum::HardwareCRC32;
+
+ if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
+ Cookie = static_cast<u32>(getMonotonicTime() ^
+ (reinterpret_cast<uptr>(this) >> 4));
+
+ initFlags();
+ reportUnrecognizedFlags();
+
+ // Store some flags locally.
+ Options.MayReturnNull = getFlags()->may_return_null;
+ Options.ZeroContents = getFlags()->zero_contents;
+ Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
+ Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
+ Options.QuarantineMaxChunkSize = getFlags()->quarantine_max_chunk_size;
+
+ Stats.initLinkerInitialized();
+ Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
+ Secondary.initLinkerInitialized(&Stats);
+
+ Quarantine.init(getFlags()->quarantine_size_kb << 10,
+ getFlags()->thread_local_quarantine_size_kb << 10);
+ }
+
+ void reset() { memset(this, 0, sizeof(*this)); }
+
+ void unmapTestOnly() {
+ TSDRegistry.unmapTestOnly();
+ Primary.unmapTestOnly();
+ }
+
+ TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
+
+ void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
+
+ // Release the resources used by a TSD, which involves:
+ // - draining the local quarantine cache to the global quarantine;
+ // - releasing the cached pointers back to the Primary;
+ // - unlinking the local stats from the global ones (destroying the cache does
+ // the last two items).
+ void commitBack(TSD<ThisT> *TSD) {
+ Quarantine.drain(&TSD->QuarantineCache,
+ QuarantineCallback(*this, TSD->Cache));
+ TSD->Cache.destroy(&Stats);
+ }
+
+ NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
+ uptr Alignment = MinAlignment,
+ bool ZeroContents = false) {
+ initThreadMaybe();
+
+ if (UNLIKELY(Alignment > MaxAlignment)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportAlignmentTooBig(Alignment, MaxAlignment);
+ }
+ if (UNLIKELY(Alignment < MinAlignment))
+ Alignment = MinAlignment;
+
+ // If the requested size happens to be 0 (more common than you might think),
+ // allocate 1 byte on top of the header. Then add the extra bytes required
+ // to fulfill the alignment requirements: we allocate enough to be sure that
+ // there will be an address in the block that will satisfy the alignment.
+ const uptr NeededSize =
+ Chunk::getHeaderSize() + roundUpTo(Size ? Size : 1, MinAlignment) +
+ ((Alignment > MinAlignment) ? (Alignment - Chunk::getHeaderSize()) : 0);
+
+ // Takes care of extravagantly large sizes as well as integer overflows.
+ if (UNLIKELY(Size >= MaxAllowedMallocSize ||
+ NeededSize >= MaxAllowedMallocSize)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
+ }
+
+ void *Block;
+ uptr ClassId;
+ uptr BlockEnd = 0;
+ if (PrimaryT::canAllocate(NeededSize)) {
+ ClassId = SizeClassMap::getClassIdBySize(NeededSize);
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ Block = TSD->Cache.allocate(ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ ClassId = 0;
+ Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd);
+ }
+
+ if (UNLIKELY(!Block)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportOutOfMemory(NeededSize);
+ }
+
+ // We only need to zero the contents for Primary backed allocations.
+ if ((ZeroContents || Options.ZeroContents) && ClassId)
+ memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
+
+ Chunk::UnpackedHeader Header = {};
+ uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+ // The following condition isn't necessarily "UNLIKELY".
+ if (!isAligned(UserPtr, Alignment)) {
+ const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
+ const uptr Offset = AlignedUserPtr - UserPtr;
+ Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
+ DCHECK_GT(Offset, 2 * sizeof(u32));
+ // The BlockMarker has no security purpose, but is specifically meant for
+ // the chunk iteration function that can be used in debugging situations.
+ // It is the only situation where we have to locate the start of a chunk
+ // based on its block address.
+ reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
+ reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
+ UserPtr = AlignedUserPtr;
+ }
+ Header.State = Chunk::State::Allocated;
+ Header.Origin = Origin & Chunk::OriginMask;
+ if (ClassId) {
+ Header.ClassId = ClassId & Chunk::ClassIdMask;
+ Header.SizeOrUnusedBytes = Size & Chunk::SizeOrUnusedBytesMask;
+ } else {
+ Header.SizeOrUnusedBytes =
+ (BlockEnd - (UserPtr + Size)) & Chunk::SizeOrUnusedBytesMask;
+ }
+ void *Ptr = reinterpret_cast<void *>(UserPtr);
+ Chunk::storeHeader(Cookie, Ptr, &Header);
+
+ if (&__scudo_allocate_hook)
+ __scudo_allocate_hook(Ptr, Size);
+
+ return Ptr;
+ }
+
+ NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
+ UNUSED uptr Alignment = MinAlignment) {
+ // For a deallocation, we only ensure minimal initialization, meaning thread
+ // local data will be left uninitialized for now (when using ELF TLS). The
+ // fallback cache will be used instead. This is a workaround for a situation
+ // where the only heap operation performed in a thread would be a free past
+ // the TLS destructors, ending up in initialized thread specific data never
+ // being destroyed properly. Any other heap operation will do a full init.
+ initThreadMaybe(/*MinimalInit=*/true);
+
+ if (&__scudo_deallocate_hook)
+ __scudo_deallocate_hook(Ptr);
+
+ if (UNLIKELY(!Ptr))
+ return;
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
+
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+ if (Options.DeallocTypeMismatch) {
+ if (Header.Origin != Origin) {
+ // With the exception of memalign'd chunks, that can be still be free'd.
+ if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||
+ Origin != Chunk::Origin::Malloc))
+ reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
+ Header.Origin, Origin);
+ }
+ }
+
+ const uptr Size = getSize(Ptr, &Header);
+ if (DeleteSize && Options.DeleteSizeMismatch) {
+ if (UNLIKELY(DeleteSize != Size))
+ reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
+ }
+
+ quarantineOrDeallocateChunk(Ptr, &Header, Size);
+ }
+
+ void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
+ initThreadMaybe();
+
+ // The following cases are handled by the C wrappers.
+ DCHECK_NE(OldPtr, nullptr);
+ DCHECK_NE(NewSize, 0);
+
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
+
+ Chunk::UnpackedHeader OldHeader;
+ Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
+
+ if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
+
+ // Pointer has to be allocated with a malloc-type function. Some
+ // applications think that it is OK to realloc a memalign'ed pointer, which
+ // will trigger this check. It really isn't.
+ if (Options.DeallocTypeMismatch) {
+ if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc))
+ reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
+ OldHeader.Origin, Chunk::Origin::Malloc);
+ }
+
+ const uptr OldSize = getSize(OldPtr, &OldHeader);
+ // If the new size is identical to the old one, or lower but within an
+ // acceptable range, we just keep the old chunk, and update its header.
+ if (NewSize == OldSize)
+ return OldPtr;
+ if (NewSize < OldSize) {
+ const uptr Delta = OldSize - NewSize;
+ if (Delta < (SizeClassMap::MaxSize / 2)) {
+ Chunk::UnpackedHeader NewHeader = OldHeader;
+ NewHeader.SizeOrUnusedBytes =
+ (OldHeader.ClassId ? NewHeader.SizeOrUnusedBytes - Delta
+ : NewHeader.SizeOrUnusedBytes + Delta) &
+ Chunk::SizeOrUnusedBytesMask;
+ Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
+ return OldPtr;
+ }
+ }
+
+ // Otherwise we allocate a new one, and deallocate the old one. Some
+ // allocators will allocate an even larger chunk (by a fixed factor) to
+ // allow for potential further in-place realloc. The gains of such a trick
+ // are currently unclear.
+ void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
+ if (NewPtr) {
+ memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
+ quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+ }
+ return NewPtr;
+ }
+
+ // TODO(kostyak): while this locks the Primary & Secondary, it still allows
+ // pointers to be fetched from the TSD. We ultimately want to
+ // lock the registry as well. For now, it's good enough.
+ void disable() {
+ initThreadMaybe();
+ Primary.disable();
+ Secondary.disable();
+ }
+
+ void enable() {
+ initThreadMaybe();
+ Secondary.enable();
+ Primary.enable();
+ }
+
+ void printStats() {
+ disable();
+ Primary.printStats();
+ Secondary.printStats();
+ Quarantine.printStats();
+ enable();
+ }
+
+ void releaseToOS() { Primary.releaseToOS(); }
+
+ // Iterate over all chunks and call a callback for all busy chunks located
+ // within the provided memory range. Said callback must not use this allocator
+ // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
+ void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
+ void *Arg) {
+ initThreadMaybe();
+ const uptr From = Base;
+ const uptr To = Base + Size;
+ auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
+ if (Block < From || Block > To)
+ return;
+ uptr ChunkSize;
+ const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
+ if (ChunkBase != InvalidChunk)
+ Callback(ChunkBase, ChunkSize, Arg);
+ };
+ Primary.iterateOverBlocks(Lambda);
+ Secondary.iterateOverBlocks(Lambda);
+ }
+
+ bool canReturnNull() {
+ initThreadMaybe();
+ return Options.MayReturnNull;
+ }
+
+ // TODO(kostyak): implement this as a "backend" to mallopt.
+ bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
+
+ // Return the usable size for a given chunk. Technically we lie, as we just
+ // report the actual size of a chunk. This is done to counteract code actively
+ // writing past the end of a chunk (like sqlite3) when the usable size allows
+ // for it, which then forces realloc to copy the usable size of a chunk as
+ // opposed to its actual size.
+ uptr getUsableSize(const void *Ptr) {
+ initThreadMaybe();
+ if (UNLIKELY(!Ptr))
+ return 0;
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+ // Getting the usable size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
+ return getSize(Ptr, &Header);
+ }
+
+ void getStats(StatCounters S) {
+ initThreadMaybe();
+ Stats.get(S);
+ }
+
+private:
+ typedef MapAllocator SecondaryT;
+ typedef typename PrimaryT::SizeClassMap SizeClassMap;
+
+ static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
+ static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
+ static const uptr MinAlignment = 1UL << MinAlignmentLog;
+ static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
+ static const uptr MaxAllowedMallocSize =
+ FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
+
+ // Constants used by the chunk iteration mechanism.
+ static const u32 BlockMarker = 0x44554353U;
+ static const uptr InvalidChunk = ~static_cast<uptr>(0);
+
+ GlobalStats Stats;
+ TSDRegistryT TSDRegistry;
+ PrimaryT Primary;
+ SecondaryT Secondary;
+ QuarantineT Quarantine;
+
+ u32 Cookie;
+
+ struct {
+ u8 MayReturnNull : 1; // may_return_null
+ u8 ZeroContents : 1; // zero_contents
+ u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
+ u8 DeleteSizeMismatch : 1; // delete_size_mismatch
+ u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
+ } Options;
+
+ // The following might get optimized out by the compiler.
+ NOINLINE void performSanityChecks() {
+ // Verify that the header offset field can hold the maximum offset. In the
+ // case of the Secondary allocator, it takes care of alignment and the
+ // offset will always be small. In the case of the Primary, the worst case
+ // scenario happens in the last size class, when the backend allocation
+ // would already be aligned on the requested alignment, which would happen
+ // to be the maximum alignment that would fit in that size class. As a
+ // result, the maximum offset will be at most the maximum alignment for the
+ // last size class minus the header size, in multiples of MinAlignment.
+ Chunk::UnpackedHeader Header = {};
+ const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
+ SizeClassMap::MaxSize - MinAlignment);
+ const uptr MaxOffset =
+ (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+ Header.Offset = MaxOffset & Chunk::OffsetMask;
+ if (UNLIKELY(Header.Offset != MaxOffset))
+ reportSanityCheckError("offset");
+
+ // Verify that we can fit the maximum size or amount of unused bytes in the
+ // header. Given that the Secondary fits the allocation to a page, the worst
+ // case scenario happens in the Primary. It will depend on the second to
+ // last and last class sizes, as well as the dynamic base for the Primary.
+ // The following is an over-approximation that works for our needs.
+ const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
+ Header.SizeOrUnusedBytes =
+ MaxSizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+ if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
+ reportSanityCheckError("size (or unused bytes)");
+
+ const uptr LargestClassId = SizeClassMap::LargestClassId;
+ Header.ClassId = LargestClassId;
+ if (UNLIKELY(Header.ClassId != LargestClassId))
+ reportSanityCheckError("class ID");
+ }
+
+ static INLINE void *getBlockBegin(const void *Ptr,
+ Chunk::UnpackedHeader *Header) {
+ return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ Chunk::getHeaderSize() -
+ (Header->Offset << MinAlignmentLog));
+ }
+
+ // Return the size of a chunk as requested during its allocation.
+ INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
+ const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+ if (Header->ClassId)
+ return SizeOrUnusedBytes;
+ return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
+ reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ TSDRegistry.initThreadMaybe(this, MinimalInit);
+ }
+
+ void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
+ uptr Size) {
+ Chunk::UnpackedHeader NewHeader = *Header;
+ // If the quarantine is disabled, the actual size of a chunk is 0 or larger
+ // than the maximum allowed, we return a chunk directly to the backend.
+ const bool BypassQuarantine = !Quarantine.getCacheSize() || !Size ||
+ (Size > Options.QuarantineMaxChunkSize);
+ if (BypassQuarantine) {
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
+ const uptr ClassId = NewHeader.ClassId;
+ if (ClassId) {
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ TSD->Cache.deallocate(ClassId, BlockBegin);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ Secondary.deallocate(BlockBegin);
+ }
+ } else {
+ NewHeader.State = Chunk::State::Quarantined;
+ Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ Quarantine.put(&TSD->QuarantineCache,
+ QuarantineCallback(*this, TSD->Cache), Ptr, Size);
+ if (UnlockRequired)
+ TSD->unlock();
+ }
+ }
+
+ // This only cares about valid busy chunks. This might change in the future.
+ uptr getChunkFromBlock(uptr Block, uptr *Size) {
+ u32 Offset = 0;
+ if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
+ Offset = reinterpret_cast<u32 *>(Block)[1];
+ const uptr P = Block + Offset + Chunk::getHeaderSize();
+ const void *Ptr = reinterpret_cast<const void *>(P);
+ Chunk::UnpackedHeader Header;
+ if (!Chunk::isValid(Cookie, Ptr, &Header) ||
+ Header.State != Chunk::State::Allocated)
+ return InvalidChunk;
+ if (Size)
+ *Size = getSize(Ptr, &Header);
+ return P;
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_COMBINED_H_
diff --git a/lib/scudo/standalone/common.cc b/lib/scudo/standalone/common.cc
new file mode 100644
index 000000000000..2a26efbb9c89
--- /dev/null
+++ b/lib/scudo/standalone/common.cc
@@ -0,0 +1,32 @@
+//===-- common.cc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "common.h"
+#include "atomic_helpers.h"
+
+namespace scudo {
+
+uptr PageSizeCached;
+uptr getPageSize();
+
+uptr getPageSizeSlow() {
+ PageSizeCached = getPageSize();
+ CHECK_NE(PageSizeCached, 0);
+ return PageSizeCached;
+}
+
+// Fatal internal map() or unmap() error (potentially OOM related).
+void NORETURN dieOnMapUnmapError(bool OutOfMemory) {
+ outputRaw("Scudo ERROR: internal map or unmap failure");
+ if (OutOfMemory)
+ outputRaw(" (OOM)");
+ outputRaw("\n");
+ die();
+}
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/common.h b/lib/scudo/standalone/common.h
new file mode 100644
index 000000000000..c015d1ca5669
--- /dev/null
+++ b/lib/scudo/standalone/common.h
@@ -0,0 +1,176 @@
+//===-- common.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMMON_H_
+#define SCUDO_COMMON_H_
+
+#include "internal_defs.h"
+
+#include "fuchsia.h"
+#include "linux.h"
+
+#include <stddef.h>
+#include <string.h>
+
+namespace scudo {
+
+template <class Dest, class Source> INLINE Dest bit_cast(const Source &S) {
+ COMPILER_CHECK(sizeof(Dest) == sizeof(Source));
+ Dest D;
+ memcpy(&D, &S, sizeof(D));
+ return D;
+}
+
+INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) {
+ return (X + Boundary - 1) & ~(Boundary - 1);
+}
+
+INLINE constexpr uptr roundDownTo(uptr X, uptr Boundary) {
+ return X & ~(Boundary - 1);
+}
+
+INLINE constexpr bool isAligned(uptr X, uptr Alignment) {
+ return (X & (Alignment - 1)) == 0;
+}
+
+template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
+
+template <class T> constexpr T Max(T A, T B) { return A > B ? A : B; }
+
+template <class T> void Swap(T &A, T &B) {
+ T Tmp = A;
+ A = B;
+ B = Tmp;
+}
+
+INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
+
+INLINE uptr getMostSignificantSetBitIndex(uptr X) {
+ DCHECK_NE(X, 0U);
+ return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
+}
+
+INLINE uptr roundUpToPowerOfTwo(uptr Size) {
+ DCHECK(Size);
+ if (isPowerOfTwo(Size))
+ return Size;
+ const uptr Up = getMostSignificantSetBitIndex(Size);
+ DCHECK_LT(Size, (1UL << (Up + 1)));
+ DCHECK_GT(Size, (1UL << Up));
+ return 1UL << (Up + 1);
+}
+
+INLINE uptr getLeastSignificantSetBitIndex(uptr X) {
+ DCHECK_NE(X, 0U);
+ return static_cast<uptr>(__builtin_ctzl(X));
+}
+
+INLINE uptr getLog2(uptr X) {
+ DCHECK(isPowerOfTwo(X));
+ return getLeastSignificantSetBitIndex(X);
+}
+
+INLINE u32 getRandomU32(u32 *State) {
+ // ANSI C linear congruential PRNG (16-bit output).
+ // return (*State = *State * 1103515245 + 12345) >> 16;
+ // XorShift (32-bit output).
+ *State ^= *State << 13;
+ *State ^= *State >> 17;
+ *State ^= *State << 5;
+ return *State;
+}
+
+INLINE u32 getRandomModN(u32 *State, u32 N) {
+ return getRandomU32(State) % N; // [0, N)
+}
+
+template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
+ if (N <= 1)
+ return;
+ u32 State = *RandState;
+ for (u32 I = N - 1; I > 0; I--)
+ Swap(A[I], A[getRandomModN(&State, I + 1)]);
+ *RandState = State;
+}
+
+// Hardware specific inlinable functions.
+
+INLINE void yieldProcessor(u8 Count) {
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ __volatile__("" ::: "memory");
+ for (u8 I = 0; I < Count; I++)
+ __asm__ __volatile__("pause");
+#elif defined(__aarch64__) || defined(__arm__)
+ __asm__ __volatile__("" ::: "memory");
+ for (u8 I = 0; I < Count; I++)
+ __asm__ __volatile__("yield");
+#endif
+ __asm__ __volatile__("" ::: "memory");
+}
+
+// Platform specific functions.
+
+extern uptr PageSizeCached;
+uptr getPageSizeSlow();
+INLINE uptr getPageSizeCached() {
+ // Bionic uses a hardcoded value.
+ if (SCUDO_ANDROID)
+ return 4096U;
+ if (LIKELY(PageSizeCached))
+ return PageSizeCached;
+ return getPageSizeSlow();
+}
+
+u32 getNumberOfCPUs();
+
+const char *getEnv(const char *Name);
+
+u64 getMonotonicTime();
+
+// Our randomness gathering function is limited to 256 bytes to ensure we get
+// as many bytes as requested, and avoid interruptions (on Linux).
+constexpr uptr MaxRandomLength = 256U;
+bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
+
+// Platform memory mapping functions.
+
+#define MAP_ALLOWNOMEM (1U << 0)
+#define MAP_NOACCESS (1U << 1)
+#define MAP_RESIZABLE (1U << 2)
+
+// Our platform memory mapping use is restricted to 3 scenarios:
+// - reserve memory at a random address (MAP_NOACCESS);
+// - commit memory in a previously reserved space;
+// - commit memory at a random address.
+// As such, only a subset of parameters combinations is valid, which is checked
+// by the function implementation. The Data parameter allows to pass opaque
+// platform specific data to the function.
+// Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
+ MapPlatformData *Data = nullptr);
+
+// Indicates that we are getting rid of the whole mapping, which might have
+// further consequences on Data, depending on the platform.
+#define UNMAP_ALL (1U << 0)
+
+void unmap(void *Addr, uptr Size, uptr Flags = 0,
+ MapPlatformData *Data = nullptr);
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+ MapPlatformData *Data = nullptr);
+
+// Internal map & unmap fatal error. This must not call map().
+void NORETURN dieOnMapUnmapError(bool OutOfMemory = false);
+
+// Logging related functions.
+
+void setAbortMessage(const char *Message);
+
+} // namespace scudo
+
+#endif // SCUDO_COMMON_H_
diff --git a/lib/scudo/standalone/crc32_hw.cc b/lib/scudo/standalone/crc32_hw.cc
new file mode 100644
index 000000000000..f4dae7b5fea8
--- /dev/null
+++ b/lib/scudo/standalone/crc32_hw.cc
@@ -0,0 +1,19 @@
+//===-- crc32_hw.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+
+namespace scudo {
+
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+ return static_cast<u32>(CRC32_INTRINSIC(Crc, Data));
+}
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/flags.cc b/lib/scudo/standalone/flags.cc
new file mode 100644
index 000000000000..21144f211102
--- /dev/null
+++ b/lib/scudo/standalone/flags.cc
@@ -0,0 +1,57 @@
+//===-- flags.cc ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags.h"
+#include "common.h"
+#include "flags_parser.h"
+#include "interface.h"
+
+namespace scudo {
+
+Flags *getFlags() {
+ static Flags F;
+ return &F;
+}
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "flags.inc"
+#undef SCUDO_FLAG
+}
+
+void registerFlags(FlagParser *Parser, Flags *F) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
+ Parser->registerFlag(#Name, Description, FlagType::FT_##Type, \
+ reinterpret_cast<void *>(&F->Name));
+#include "flags.inc"
+#undef SCUDO_FLAG
+}
+
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+ return STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+static const char *getScudoDefaultOptions() {
+ return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
+void initFlags() {
+ Flags *F = getFlags();
+ F->setDefaults();
+ FlagParser Parser;
+ registerFlags(&Parser, F);
+ Parser.parseString(getCompileDefinitionScudoDefaultOptions());
+ Parser.parseString(getScudoDefaultOptions());
+ Parser.parseString(getEnv("SCUDO_OPTIONS"));
+}
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/flags.h b/lib/scudo/standalone/flags.h
new file mode 100644
index 000000000000..edd39a1b8ba9
--- /dev/null
+++ b/lib/scudo/standalone/flags.h
@@ -0,0 +1,30 @@
+//===-- flags.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "flags.inc"
+#undef SCUDO_FLAG
+ void setDefaults();
+};
+
+Flags *getFlags();
+void initFlags();
+class FlagParser;
+void registerFlags(FlagParser *Parser, Flags *F);
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_H_
diff --git a/lib/scudo/standalone/flags.inc b/lib/scudo/standalone/flags.inc
new file mode 100644
index 000000000000..25b86e14fa94
--- /dev/null
+++ b/lib/scudo/standalone/flags.inc
@@ -0,0 +1,50 @@
+//===-- flags.inc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+#error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, quarantine_size_kb, 0,
+ "Size (in kilobytes) of quarantine used to delay the actual "
+ "deallocation of chunks. Lower value may reduce memory usage but "
+ "decrease the effectiveness of the mitigation.")
+
+SCUDO_FLAG(int, thread_local_quarantine_size_kb, 0,
+ "Size (in kilobytes) of per-thread cache used to offload the global "
+ "quarantine. Lower value may reduce memory usage but might increase "
+ "the contention on the global quarantine.")
+
+SCUDO_FLAG(int, quarantine_max_chunk_size, 0,
+ "Size (in bytes) up to which chunks will be quarantined (if lower "
+ "than or equal to).")
+
+SCUDO_FLAG(bool, dealloc_type_mismatch, false,
+ "Terminate on a type mismatch in allocation-deallocation functions, "
+ "eg: malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, delete_size_mismatch, true,
+ "Terminate on a size mismatch between a sized-delete and the actual "
+ "size of a chunk (as provided to new/new[]).")
+
+SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.")
+
+SCUDO_FLAG(int, rss_limit_mb, -1,
+ "Enforce an upper limit (in megabytes) to the process RSS. The "
+ "allocator will terminate or return NULL when allocations are "
+ "attempted past that limit (depending on may_return_null). Negative "
+ "values disable the feature.")
+
+SCUDO_FLAG(bool, may_return_null, true,
+ "Indicate whether the allocator should terminate instead of "
+ "returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
+ "invalid allocation alignments, etc.")
+
+SCUDO_FLAG(int, release_to_os_interval_ms, 5000,
+ "Interval (in milliseconds) at which to attempt release of unused "
+ "memory to the OS. Negative values disable the feature.")
diff --git a/lib/scudo/standalone/flags_parser.cc b/lib/scudo/standalone/flags_parser.cc
new file mode 100644
index 000000000000..5f1253f58d52
--- /dev/null
+++ b/lib/scudo/standalone/flags_parser.cc
@@ -0,0 +1,164 @@
+//===-- flags_parser.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags_parser.h"
+#include "common.h"
+#include "report.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+namespace scudo {
+
+class UnknownFlagsRegistry {
+ static const u32 MaxUnknownFlags = 16;
+ const char *UnknownFlagsNames[MaxUnknownFlags];
+ u32 NumberOfUnknownFlags;
+
+public:
+ void add(const char *Name) {
+ CHECK_LT(NumberOfUnknownFlags, MaxUnknownFlags);
+ UnknownFlagsNames[NumberOfUnknownFlags++] = Name;
+ }
+
+ void report() {
+ if (!NumberOfUnknownFlags)
+ return;
+ Printf("Scudo WARNING: found %d unrecognized flag(s):\n",
+ NumberOfUnknownFlags);
+ for (u32 I = 0; I < NumberOfUnknownFlags; ++I)
+ Printf(" %s\n", UnknownFlagsNames[I]);
+ NumberOfUnknownFlags = 0;
+ }
+};
+static UnknownFlagsRegistry UnknownFlags;
+
+void reportUnrecognizedFlags() { UnknownFlags.report(); }
+
+void FlagParser::printFlagDescriptions() {
+ Printf("Available flags for Scudo:\n");
+ for (u32 I = 0; I < NumberOfFlags; ++I)
+ Printf("\t%s\n\t\t- %s\n", Flags[I].Name, Flags[I].Desc);
+}
+
+static bool isSeparator(char C) {
+ return C == ' ' || C == ',' || C == ':' || C == '\n' || C == '\t' ||
+ C == '\r';
+}
+
+static bool isSeparatorOrNull(char C) { return !C || isSeparator(C); }
+
+void FlagParser::skipWhitespace() {
+ while (isSeparator(Buffer[Pos]))
+ ++Pos;
+}
+
+void FlagParser::parseFlag() {
+ const uptr NameStart = Pos;
+ while (Buffer[Pos] != '=' && !isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ if (Buffer[Pos] != '=')
+ reportError("expected '='");
+ const char *Name = Buffer + NameStart;
+ const uptr ValueStart = ++Pos;
+ const char *Value;
+ if (Buffer[Pos] == '\'' || Buffer[Pos] == '"') {
+ const char Quote = Buffer[Pos++];
+ while (Buffer[Pos] != 0 && Buffer[Pos] != Quote)
+ ++Pos;
+ if (Buffer[Pos] == 0)
+ reportError("unterminated string");
+ Value = Buffer + ValueStart + 1;
+ ++Pos; // consume the closing quote
+ } else {
+ while (!isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ Value = Buffer + ValueStart;
+ }
+ if (!runHandler(Name, Value))
+ reportError("flag parsing failed.");
+}
+
+void FlagParser::parseFlags() {
+ while (true) {
+ skipWhitespace();
+ if (Buffer[Pos] == 0)
+ break;
+ parseFlag();
+ }
+}
+
+void FlagParser::parseString(const char *S) {
+ if (!S)
+ return;
+ // Backup current parser state to allow nested parseString() calls.
+ const char *OldBuffer = Buffer;
+ const uptr OldPos = Pos;
+ Buffer = S;
+ Pos = 0;
+
+ parseFlags();
+
+ Buffer = OldBuffer;
+ Pos = OldPos;
+}
+
+INLINE bool parseBool(const char *Value, bool *b) {
+ if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
+ strncmp(Value, "false", 5) == 0) {
+ *b = false;
+ return true;
+ }
+ if (strncmp(Value, "1", 1) == 0 || strncmp(Value, "yes", 3) == 0 ||
+ strncmp(Value, "true", 4) == 0) {
+ *b = true;
+ return true;
+ }
+ return false;
+}
+
+bool FlagParser::runHandler(const char *Name, const char *Value) {
+ for (u32 I = 0; I < NumberOfFlags; ++I) {
+ const uptr Len = strlen(Flags[I].Name);
+ if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != '=')
+ continue;
+ bool Ok = false;
+ switch (Flags[I].Type) {
+ case FlagType::FT_bool:
+ Ok = parseBool(Value, reinterpret_cast<bool *>(Flags[I].Var));
+ if (!Ok)
+ reportInvalidFlag("bool", Value);
+ break;
+ case FlagType::FT_int:
+ char *ValueEnd;
+ *reinterpret_cast<int *>(Flags[I].Var) =
+ static_cast<int>(strtol(Value, &ValueEnd, 10));
+ Ok =
+ *ValueEnd == '"' || *ValueEnd == '\'' || isSeparatorOrNull(*ValueEnd);
+ if (!Ok)
+ reportInvalidFlag("int", Value);
+ break;
+ }
+ return Ok;
+ }
+ // Unrecognized flag. This is not a fatal error, we may print a warning later.
+ UnknownFlags.add(Name);
+ return true;
+}
+
+void FlagParser::registerFlag(const char *Name, const char *Desc, FlagType Type,
+ void *Var) {
+ CHECK_LT(NumberOfFlags, MaxFlags);
+ Flags[NumberOfFlags].Name = Name;
+ Flags[NumberOfFlags].Desc = Desc;
+ Flags[NumberOfFlags].Type = Type;
+ Flags[NumberOfFlags].Var = Var;
+ ++NumberOfFlags;
+}
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/flags_parser.h b/lib/scudo/standalone/flags_parser.h
new file mode 100644
index 000000000000..857b50e880ec
--- /dev/null
+++ b/lib/scudo/standalone/flags_parser.h
@@ -0,0 +1,55 @@
+//===-- flags_parser.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_PARSER_H_
+#define SCUDO_FLAGS_PARSER_H_
+
+#include "report.h"
+#include "string_utils.h"
+
+#include <stddef.h>
+
+namespace scudo {
+
+enum class FlagType : u8 {
+ FT_bool,
+ FT_int,
+};
+
+class FlagParser {
+public:
+ void registerFlag(const char *Name, const char *Desc, FlagType Type,
+ void *Var);
+ void parseString(const char *S);
+ void printFlagDescriptions();
+
+private:
+ static const u32 MaxFlags = 12;
+ struct Flag {
+ const char *Name;
+ const char *Desc;
+ FlagType Type;
+ void *Var;
+ } Flags[MaxFlags];
+
+ u32 NumberOfFlags = 0;
+ const char *Buffer = nullptr;
+ uptr Pos = 0;
+
+ void reportFatalError(const char *Error);
+ void skipWhitespace();
+ void parseFlags();
+ void parseFlag();
+ bool runHandler(const char *Name, const char *Value);
+};
+
+void reportUnrecognizedFlags();
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_PARSER_H_
diff --git a/lib/scudo/standalone/fuchsia.cc b/lib/scudo/standalone/fuchsia.cc
new file mode 100644
index 000000000000..896d346e7e72
--- /dev/null
+++ b/lib/scudo/standalone/fuchsia.cc
@@ -0,0 +1,189 @@
+//===-- fuchsia.cc ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#include <limits.h> // for PAGE_SIZE
+#include <stdlib.h> // for getenv()
+#include <zircon/compiler.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+
+namespace scudo {
+
+uptr getPageSize() { return PAGE_SIZE; }
+
+void NORETURN die() { __builtin_trap(); }
+
+// We zero-initialize the Extra parameter of map(), make sure this is consistent
+// with ZX_HANDLE_INVALID.
+COMPILER_CHECK(ZX_HANDLE_INVALID == 0);
+
+static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
+ // Only scenario so far.
+ DCHECK(Data);
+ DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
+
+ const zx_status_t Status = _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+ Size, &Data->Vmar, &Data->VmarBase);
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ return reinterpret_cast<void *>(Data->VmarBase);
+}
+
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
+ MapPlatformData *Data) {
+ DCHECK_EQ(Size % PAGE_SIZE, 0);
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+
+ // For MAP_NOACCESS, just allocate a Vmar and return.
+ if (Flags & MAP_NOACCESS)
+ return allocateVmar(Size, Data, AllowNoMem);
+
+ const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+ CHECK_NE(Vmar, ZX_HANDLE_INVALID);
+
+ zx_status_t Status;
+ zx_handle_t Vmo;
+ uint64_t VmoSize = 0;
+ if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
+ // If a Vmo was specified, it's a resize operation.
+ CHECK(Addr);
+ DCHECK(Flags & MAP_RESIZABLE);
+ Vmo = Data->Vmo;
+ VmoSize = Data->VmoSize;
+ Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ } else {
+ // Otherwise, create a Vmo and set its name.
+ Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
+ }
+
+ uintptr_t P;
+ zx_vm_option_t MapFlags =
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
+ const uint64_t Offset =
+ Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
+ if (Offset)
+ MapFlags |= ZX_VM_SPECIFIC;
+ Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
+ // No need to track the Vmo if we don't intend on resizing it. Close it.
+ if (Flags & MAP_RESIZABLE) {
+ DCHECK(Data);
+ DCHECK_EQ(Data->Vmo, ZX_HANDLE_INVALID);
+ Data->Vmo = Vmo;
+ } else {
+ CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
+ }
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ if (Data)
+ Data->VmoSize += Size;
+
+ return reinterpret_cast<void *>(P);
+}
+
+void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
+ if (Flags & UNMAP_ALL) {
+ DCHECK_NE(Data, nullptr);
+ const zx_handle_t Vmar = Data->Vmar;
+ DCHECK_NE(Vmar, _zx_vmar_root_self());
+ // Destroying the vmar effectively unmaps the whole mapping.
+ CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
+ CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
+ } else {
+ const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+ const zx_status_t Status =
+ _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
+ if (Status != ZX_OK)
+ dieOnMapUnmapError();
+ }
+ if (Data) {
+ if (Data->Vmo != ZX_HANDLE_INVALID)
+ CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
+ memset(Data, 0, sizeof(*Data));
+ }
+}
+
+void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
+ MapPlatformData *Data) {
+ DCHECK(Data);
+ DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
+ DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
+ const zx_status_t Status =
+ _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
+// because the Fuchsia implementation of sync_mutex_t has clang thread safety
+// annotations. Were we to apply proper capability annotations to the top level
+// HybridMutex class itself, they would not be needed. As it stands, the
+// thread analysis thinks that we are locking the mutex and accidentally leaving
+// it locked on the way out.
+bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ // Size and alignment must be compatible between both types.
+ return sync_mutex_trylock(&M) == ZX_OK;
+}
+
+void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_lock(&M);
+}
+
+void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_unlock(&M);
+}
+
+u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
+
+u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
+
+bool getRandom(void *Buffer, uptr Length, bool Blocking) {
+ COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
+ if (!Buffer || !Length || Length > MaxRandomLength)
+ return false;
+ _zx_cprng_draw(Buffer, Length);
+ return true;
+}
+
+void outputRaw(const char *Buffer) {
+ __sanitizer_log_write(Buffer, strlen(Buffer));
+}
+
+void setAbortMessage(const char *Message) {}
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
diff --git a/lib/scudo/standalone/fuchsia.h b/lib/scudo/standalone/fuchsia.h
new file mode 100644
index 000000000000..d6993f892140
--- /dev/null
+++ b/lib/scudo/standalone/fuchsia.h
@@ -0,0 +1,31 @@
+//===-- fuchsia.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FUCHSIA_H_
+#define SCUDO_FUCHSIA_H_
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include <zircon/process.h>
+
+namespace scudo {
+
+struct MapPlatformData {
+ zx_handle_t Vmar;
+ zx_handle_t Vmo;
+ uintptr_t VmarBase;
+ uint64_t VmoSize;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_FUCHSIA_H_
diff --git a/lib/scudo/standalone/interface.h b/lib/scudo/standalone/interface.h
new file mode 100644
index 000000000000..e2639823f426
--- /dev/null
+++ b/lib/scudo/standalone/interface.h
@@ -0,0 +1,29 @@
+//===-- interface.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERFACE_H_
+#define SCUDO_INTERFACE_H_
+
+#include "internal_defs.h"
+
+extern "C" {
+
+WEAK INTERFACE const char *__scudo_default_options();
+
+// Post-allocation & pre-deallocation hooks.
+// They must be thread-safe and not use heap related functions.
+WEAK INTERFACE void __scudo_allocate_hook(void *ptr, size_t size);
+WEAK INTERFACE void __scudo_deallocate_hook(void *ptr);
+
+WEAK INTERFACE void __scudo_print_stats(void);
+
+typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
+
+} // extern "C"
+
+#endif // SCUDO_INTERFACE_H_
diff --git a/lib/scudo/standalone/internal_defs.h b/lib/scudo/standalone/internal_defs.h
new file mode 100644
index 000000000000..901eac372b36
--- /dev/null
+++ b/lib/scudo/standalone/internal_defs.h
@@ -0,0 +1,135 @@
+//===-- internal_defs.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERNAL_DEFS_H_
+#define SCUDO_INTERNAL_DEFS_H_
+
+#include "platform.h"
+
+#include <stdint.h>
+
+#ifndef SCUDO_DEBUG
+#define SCUDO_DEBUG 0
+#endif
+
+#define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
+
+// String related macros.
+
+#define STRINGIFY_(S) #S
+#define STRINGIFY(S) STRINGIFY_(S)
+#define CONCATENATE_(S, C) S##C
+#define CONCATENATE(S, C) CONCATENATE_(S, C)
+
+// Attributes & builtins related macros.
+
+#define INTERFACE __attribute__((visibility("default")))
+#define WEAK __attribute__((weak))
+#define INLINE inline
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define ALIAS(X) __attribute__((alias(X)))
+// Please only use the ALIGNED macro before the type. Using ALIGNED after the
+// variable declaration is not portable.
+#define ALIGNED(X) __attribute__((aligned(X)))
+#define FORMAT(F, A) __attribute__((format(printf, F, A)))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define THREADLOCAL __thread
+#define LIKELY(X) __builtin_expect(!!(X), 1)
+#define UNLIKELY(X) __builtin_expect(!!(X), 0)
+#if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(X) generates prefetchnt0 on x86
+#define PREFETCH(X) __asm__("prefetchnta (%0)" : : "r"(X))
+#else
+#define PREFETCH(X) __builtin_prefetch(X)
+#endif
+#define UNUSED __attribute__((unused))
+#define USED __attribute__((used))
+#define NOEXCEPT noexcept
+
+namespace scudo {
+
+typedef unsigned long uptr;
+typedef signed long sptr;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef signed char s8;
+typedef signed short s16;
+typedef signed int s32;
+typedef signed long long s64;
+
+// The following two functions have platform specific implementations.
+void outputRaw(const char *Buffer);
+void NORETURN die();
+
+#define RAW_CHECK_MSG(Expr, Msg) \
+ do { \
+ if (UNLIKELY(!(Expr))) { \
+ outputRaw(Msg); \
+ die(); \
+ } \
+ } while (false)
+
+#define RAW_CHECK(Expr) RAW_CHECK_MSG(Expr, #Expr)
+
+void NORETURN reportCheckFailed(const char *File, int Line,
+ const char *Condition, u64 Value1, u64 Value2);
+
+#define CHECK_IMPL(C1, Op, C2) \
+ do { \
+ u64 V1 = (u64)(C1); \
+ u64 V2 = (u64)(C2); \
+ if (UNLIKELY(!(V1 Op V2))) { \
+ reportCheckFailed(__FILE__, __LINE__, "(" #C1 ") " #Op " (" #C2 ")", V1, \
+ V2); \
+ die(); \
+ } \
+ } while (false)
+
+#define CHECK(A) CHECK_IMPL((A), !=, 0)
+#define CHECK_EQ(A, B) CHECK_IMPL((A), ==, (B))
+#define CHECK_NE(A, B) CHECK_IMPL((A), !=, (B))
+#define CHECK_LT(A, B) CHECK_IMPL((A), <, (B))
+#define CHECK_LE(A, B) CHECK_IMPL((A), <=, (B))
+#define CHECK_GT(A, B) CHECK_IMPL((A), >, (B))
+#define CHECK_GE(A, B) CHECK_IMPL((A), >=, (B))
+
+#if SCUDO_DEBUG
+#define DCHECK(A) CHECK(A)
+#define DCHECK_EQ(A, B) CHECK_EQ(A, B)
+#define DCHECK_NE(A, B) CHECK_NE(A, B)
+#define DCHECK_LT(A, B) CHECK_LT(A, B)
+#define DCHECK_LE(A, B) CHECK_LE(A, B)
+#define DCHECK_GT(A, B) CHECK_GT(A, B)
+#define DCHECK_GE(A, B) CHECK_GE(A, B)
+#else
+#define DCHECK(A)
+#define DCHECK_EQ(A, B)
+#define DCHECK_NE(A, B)
+#define DCHECK_LT(A, B)
+#define DCHECK_LE(A, B)
+#define DCHECK_GT(A, B)
+#define DCHECK_GE(A, B)
+#endif
+
+// The superfluous die() call effectively makes this macro NORETURN.
+#define UNREACHABLE(Msg) \
+ do { \
+ CHECK(0 && Msg); \
+ die(); \
+ } while (0)
+
+#define COMPILER_CHECK(Pred) static_assert(Pred, "")
+
+enum LinkerInitialized { LINKER_INITIALIZED = 0 };
+
+} // namespace scudo
+
+#endif // SCUDO_INTERNAL_DEFS_H_
diff --git a/lib/scudo/standalone/linux.cc b/lib/scudo/standalone/linux.cc
new file mode 100644
index 000000000000..049477bba8b0
--- /dev/null
+++ b/lib/scudo/standalone/linux.cc
@@ -0,0 +1,171 @@
+//===-- linux.cc ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "common.h"
+#include "linux.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/futex.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#if SCUDO_ANDROID
+#include <sys/prctl.h>
+// Definitions of prctl arguments to set a vma name in Android kernels.
+#define ANDROID_PR_SET_VMA 0x53564d41
+#define ANDROID_PR_SET_VMA_ANON_NAME 0
+#endif
+
+namespace scudo {
+
+uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
+
+void NORETURN die() { abort(); }
+
+void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ int MmapFlags = MAP_PRIVATE | MAP_ANON;
+ int MmapProt;
+ if (Flags & MAP_NOACCESS) {
+ MmapFlags |= MAP_NORESERVE;
+ MmapProt = PROT_NONE;
+ } else {
+ MmapProt = PROT_READ | PROT_WRITE;
+ }
+ if (Addr) {
+ // Currently no scenario for a noaccess mapping with a fixed address.
+ DCHECK_EQ(Flags & MAP_NOACCESS, 0);
+ MmapFlags |= MAP_FIXED;
+ }
+ void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
+ if (P == MAP_FAILED) {
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ dieOnMapUnmapError(errno == ENOMEM);
+ return nullptr;
+ }
+#if SCUDO_ANDROID
+ if (!(Flags & MAP_NOACCESS))
+ prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
+#endif
+ return P;
+}
+
+void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ if (munmap(Addr, Size) != 0)
+ dieOnMapUnmapError();
+}
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+ UNUSED MapPlatformData *Data) {
+ void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
+ while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+ }
+}
+
+// Calling getenv should be fine (c)(tm) at any time.
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+namespace {
+enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
+}
+
+bool HybridMutex::tryLock() {
+ return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
+}
+
+// The following is based on https://akkadia.org/drepper/futex.pdf.
+void HybridMutex::lockSlow() {
+ u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
+ if (V == Unlocked)
+ return;
+ if (V != Sleeping)
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ while (V != Unlocked) {
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
+ nullptr, nullptr, 0);
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ }
+}
+
+void HybridMutex::unlock() {
+ if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
+ atomic_store(&M, Unlocked, memory_order_release);
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
+ nullptr, nullptr, 0);
+ }
+}
+
+u64 getMonotonicTime() {
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+}
+
+u32 getNumberOfCPUs() {
+ cpu_set_t CPUs;
+ CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
+ return static_cast<u32>(CPU_COUNT(&CPUs));
+}
+
+// Blocking is possibly unused if the getrandom block is not compiled in.
+bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
+ if (!Buffer || !Length || Length > MaxRandomLength)
+ return false;
+ ssize_t ReadBytes;
+#if defined(SYS_getrandom)
+#if !defined(GRND_NONBLOCK)
+#define GRND_NONBLOCK 1
+#endif
+ // Up to 256 bytes, getrandom will not be interrupted.
+ ReadBytes =
+ syscall(SYS_getrandom, Buffer, Length, Blocking ? 0 : GRND_NONBLOCK);
+ if (ReadBytes == static_cast<ssize_t>(Length))
+ return true;
+#endif // defined(SYS_getrandom)
+ // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
+ // Blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
+ const int FileDesc = open("/dev/urandom", O_RDONLY);
+ if (FileDesc == -1)
+ return false;
+ ReadBytes = read(FileDesc, Buffer, Length);
+ close(FileDesc);
+ return (ReadBytes == static_cast<ssize_t>(Length));
+}
+
+void outputRaw(const char *Buffer) {
+ static HybridMutex Mutex;
+ ScopedLock L(Mutex);
+ write(2, Buffer, strlen(Buffer));
+}
+
+extern "C" WEAK void android_set_abort_message(const char *);
+
+void setAbortMessage(const char *Message) {
+ if (&android_set_abort_message)
+ android_set_abort_message(Message);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/lib/scudo/standalone/linux.h b/lib/scudo/standalone/linux.h
new file mode 100644
index 000000000000..92c9eb5e97ee
--- /dev/null
+++ b/lib/scudo/standalone/linux.h
@@ -0,0 +1,70 @@
+//===-- linux.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LINUX_H_
+#define SCUDO_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+namespace scudo {
+
+// MapPlatformData is unused on Linux, define it as a minimally sized structure.
+struct MapPlatformData {};
+
+#if SCUDO_ANDROID
+
+#if defined(__aarch64__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrs %0, tpidr_el0" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__arm__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__i386__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("movl %%gs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__x86_64__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mov %%fs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+#else
+#error "Unsupported architecture."
+#endif
+
+// The Android Bionic team has allocated a TLS slot for sanitizers starting
+// with Q, given that Android currently doesn't support ELF TLS. It is used to
+// store sanitizer thread specific data.
+static const int TLS_SLOT_SANITIZER = 8; // TODO(kostyak): 6 for Q!!
+
+ALWAYS_INLINE uptr *getAndroidTlsPtr() {
+ return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
+}
+
+#endif // SCUDO_ANDROID
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_LINUX_H_
diff --git a/lib/scudo/standalone/list.h b/lib/scudo/standalone/list.h
new file mode 100644
index 000000000000..139e73eff5ad
--- /dev/null
+++ b/lib/scudo/standalone/list.h
@@ -0,0 +1,156 @@
+//===-- list.h --------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LIST_H_
+#define SCUDO_LIST_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Intrusive POD singly-linked list.
+// An object with all zero fields should represent a valid empty list. clear()
+// should be called on all non-zero-initialized objects before using.
+template <class Item> struct IntrusiveList {
+ friend class Iterator;
+
+ void clear() {
+ First = Last = nullptr;
+ Size = 0;
+ }
+
+ bool empty() const { return Size == 0; }
+ uptr size() const { return Size; }
+
+ void push_back(Item *X) {
+ if (empty()) {
+ X->Next = nullptr;
+ First = Last = X;
+ Size = 1;
+ } else {
+ X->Next = nullptr;
+ Last->Next = X;
+ Last = X;
+ Size++;
+ }
+ }
+
+ void push_front(Item *X) {
+ if (empty()) {
+ X->Next = nullptr;
+ First = Last = X;
+ Size = 1;
+ } else {
+ X->Next = First;
+ First = X;
+ Size++;
+ }
+ }
+
+ void pop_front() {
+ DCHECK(!empty());
+ First = First->Next;
+ if (!First)
+ Last = nullptr;
+ Size--;
+ }
+
+ void extract(Item *Prev, Item *X) {
+ DCHECK(!empty());
+ DCHECK_NE(Prev, nullptr);
+ DCHECK_NE(X, nullptr);
+ DCHECK_EQ(Prev->Next, X);
+ Prev->Next = X->Next;
+ if (Last == X)
+ Last = Prev;
+ Size--;
+ }
+
+ Item *front() { return First; }
+ const Item *front() const { return First; }
+ Item *back() { return Last; }
+ const Item *back() const { return Last; }
+
+ void append_front(IntrusiveList<Item> *L) {
+ DCHECK_NE(this, L);
+ if (L->empty())
+ return;
+ if (empty()) {
+ *this = *L;
+ } else if (!L->empty()) {
+ L->Last->Next = First;
+ First = L->First;
+ Size += L->size();
+ }
+ L->clear();
+ }
+
+ void append_back(IntrusiveList<Item> *L) {
+ DCHECK_NE(this, L);
+ if (L->empty())
+ return;
+ if (empty()) {
+ *this = *L;
+ } else {
+ Last->Next = L->First;
+ Last = L->Last;
+ Size += L->size();
+ }
+ L->clear();
+ }
+
+ void checkConsistency() {
+ if (Size == 0) {
+ CHECK_EQ(First, 0);
+ CHECK_EQ(Last, 0);
+ } else {
+ uptr count = 0;
+ for (Item *I = First;; I = I->Next) {
+ count++;
+ if (I == Last)
+ break;
+ }
+ CHECK_EQ(size(), count);
+ CHECK_EQ(Last->Next, 0);
+ }
+ }
+
+ template <class ItemT> class IteratorBase {
+ public:
+ explicit IteratorBase(ItemT *CurrentItem) : Current(CurrentItem) {}
+ IteratorBase &operator++() {
+ Current = Current->Next;
+ return *this;
+ }
+ bool operator!=(IteratorBase Other) const {
+ return Current != Other.Current;
+ }
+ ItemT &operator*() { return *Current; }
+
+ private:
+ ItemT *Current;
+ };
+
+ typedef IteratorBase<Item> Iterator;
+ typedef IteratorBase<const Item> ConstIterator;
+
+ Iterator begin() { return Iterator(First); }
+ Iterator end() { return Iterator(nullptr); }
+
+ ConstIterator begin() const { return ConstIterator(First); }
+ ConstIterator end() const { return ConstIterator(nullptr); }
+
+private:
+ uptr Size;
+ Item *First;
+ Item *Last;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LIST_H_
diff --git a/lib/scudo/standalone/local_cache.h b/lib/scudo/standalone/local_cache.h
new file mode 100644
index 000000000000..2acc28874015
--- /dev/null
+++ b/lib/scudo/standalone/local_cache.h
@@ -0,0 +1,181 @@
+//===-- local_cache.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LOCAL_CACHE_H_
+#define SCUDO_LOCAL_CACHE_H_
+
+#include "internal_defs.h"
+#include "report.h"
+#include "stats.h"
+
+namespace scudo {
+
+template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+
+ struct TransferBatch {
+ static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
+ void setFromArray(void **Array, u32 N) {
+ DCHECK_LE(N, MaxNumCached);
+ for (u32 I = 0; I < N; I++)
+ Batch[I] = Array[I];
+ Count = N;
+ }
+ void clear() { Count = 0; }
+ void add(void *P) {
+ DCHECK_LT(Count, MaxNumCached);
+ Batch[Count++] = P;
+ }
+ void copyToArray(void **Array) const {
+ for (u32 I = 0; I < Count; I++)
+ Array[I] = Batch[I];
+ }
+ u32 getCount() const { return Count; }
+ void *get(u32 I) const {
+ DCHECK_LE(I, Count);
+ return Batch[I];
+ }
+ static u32 getMaxCached(uptr Size) {
+ return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
+ }
+ TransferBatch *Next;
+
+ private:
+ u32 Count;
+ void *Batch[MaxNumCached];
+ };
+
+ void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
+ Stats.initLinkerInitialized();
+ if (S)
+ S->link(&Stats);
+ Allocator = A;
+ }
+
+ void init(GlobalStats *S, SizeClassAllocator *A) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(S, A);
+ }
+
+ void destroy(GlobalStats *S) {
+ drain();
+ if (S)
+ S->unlink(&Stats);
+ }
+
+ void *allocate(uptr ClassId) {
+ CHECK_LT(ClassId, NumClasses);
+ PerClass *C = &PerClassArray[ClassId];
+ if (C->Count == 0) {
+ if (UNLIKELY(!refill(C, ClassId)))
+ return nullptr;
+ DCHECK_GT(C->Count, 0);
+ }
+ // We read ClassSize first before accessing Chunks because it's adjacent to
+ // Count, while Chunks might be further off (depending on Count). That keeps
+ // the memory accesses in close quarters.
+ const uptr ClassSize = C->ClassSize;
+ void *P = C->Chunks[--C->Count];
+ // The jury is still out as to whether any kind of PREFETCH here increases
+ // performance. It definitely decreases performance on Android though.
+ // if (!SCUDO_ANDROID) PREFETCH(P);
+ Stats.add(StatAllocated, ClassSize);
+ return P;
+ }
+
+ void deallocate(uptr ClassId, void *P) {
+ CHECK_LT(ClassId, NumClasses);
+ PerClass *C = &PerClassArray[ClassId];
+ // We still have to initialize the cache in the event that the first heap
+ // operation in a thread is a deallocation.
+ initCacheMaybe(C);
+ if (C->Count == C->MaxCount)
+ drain(C, ClassId);
+ // See comment in allocate() about memory accesses.
+ const uptr ClassSize = C->ClassSize;
+ C->Chunks[C->Count++] = P;
+ Stats.sub(StatAllocated, ClassSize);
+ }
+
+ void drain() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ PerClass *C = &PerClassArray[I];
+ while (C->Count > 0)
+ drain(C, I);
+ }
+ }
+
+ TransferBatch *createBatch(uptr ClassId, void *B) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ B = allocate(SizeClassMap::BatchClassId);
+ return reinterpret_cast<TransferBatch *>(B);
+ }
+
+ LocalStats &getStats() { return Stats; }
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ struct PerClass {
+ u32 Count;
+ u32 MaxCount;
+ uptr ClassSize;
+ void *Chunks[2 * TransferBatch::MaxNumCached];
+ };
+ PerClass PerClassArray[NumClasses];
+ LocalStats Stats;
+ SizeClassAllocator *Allocator;
+
+ ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
+ if (LIKELY(C->MaxCount))
+ return;
+ initCache();
+ DCHECK_NE(C->MaxCount, 0U);
+ }
+
+ NOINLINE void initCache() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ PerClass *P = &PerClassArray[I];
+ const uptr Size = SizeClassAllocator::getSizeByClassId(I);
+ P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
+ P->ClassSize = Size;
+ }
+ }
+
+ void destroyBatch(uptr ClassId, void *B) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ deallocate(SizeClassMap::BatchClassId, B);
+ }
+
+ NOINLINE bool refill(PerClass *C, uptr ClassId) {
+ initCacheMaybe(C);
+ TransferBatch *B = Allocator->popBatch(this, ClassId);
+ if (UNLIKELY(!B))
+ return false;
+ DCHECK_GT(B->getCount(), 0);
+ B->copyToArray(C->Chunks);
+ C->Count = B->getCount();
+ destroyBatch(ClassId, B);
+ return true;
+ }
+
+ NOINLINE void drain(PerClass *C, uptr ClassId) {
+ const u32 Count = Min(C->MaxCount / 2, C->Count);
+ const uptr FirstIndexToDrain = C->Count - Count;
+ TransferBatch *B = createBatch(ClassId, C->Chunks[FirstIndexToDrain]);
+ if (UNLIKELY(!B))
+ reportOutOfMemory(
+ SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId));
+ B->setFromArray(&C->Chunks[FirstIndexToDrain], Count);
+ C->Count -= Count;
+ Allocator->pushBatch(ClassId, B);
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LOCAL_CACHE_H_
diff --git a/lib/scudo/standalone/mutex.h b/lib/scudo/standalone/mutex.h
new file mode 100644
index 000000000000..b6dc9188d347
--- /dev/null
+++ b/lib/scudo/standalone/mutex.h
@@ -0,0 +1,73 @@
+//===-- mutex.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MUTEX_H_
+#define SCUDO_MUTEX_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+
+#include <string.h>
+
+#if SCUDO_FUCHSIA
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#endif
+
+namespace scudo {
+
+class HybridMutex {
+public:
+ void init() { memset(this, 0, sizeof(*this)); }
+ bool tryLock();
+ NOINLINE void lock() {
+ if (tryLock())
+ return;
+ // The compiler may try to fully unroll the loop, ending up in a
+ // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
+ // is large, ugly and unneeded, a compact loop is better for our purpose
+ // here. Use a pragma to tell the compiler not to unroll the loop.
+#ifdef __clang__
+#pragma nounroll
+#endif
+ for (u8 I = 0U; I < NumberOfTries; I++) {
+ yieldProcessor(NumberOfYields);
+ if (tryLock())
+ return;
+ }
+ lockSlow();
+ }
+ void unlock();
+
+private:
+ static constexpr u8 NumberOfTries = 10U;
+ static constexpr u8 NumberOfYields = 10U;
+
+#if SCUDO_LINUX
+ atomic_u32 M;
+#elif SCUDO_FUCHSIA
+ sync_mutex_t M;
+#endif
+
+ void lockSlow();
+};
+
+class ScopedLock {
+public:
+ explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
+ ~ScopedLock() { Mutex.unlock(); }
+
+private:
+ HybridMutex &Mutex;
+
+ ScopedLock(const ScopedLock &) = delete;
+ void operator=(const ScopedLock &) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_MUTEX_H_
diff --git a/lib/scudo/standalone/platform.h b/lib/scudo/standalone/platform.h
new file mode 100644
index 000000000000..a897a566f9bf
--- /dev/null
+++ b/lib/scudo/standalone/platform.h
@@ -0,0 +1,70 @@
+//===-- platform.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PLATFORM_H_
+#define SCUDO_PLATFORM_H_
+
+#if defined(__linux__)
+#define SCUDO_LINUX 1
+#else
+#define SCUDO_LINUX 0
+#endif
+
+#if defined(__ANDROID__)
+#define SCUDO_ANDROID 1
+#else
+#define SCUDO_ANDROID 0
+#endif
+
+#if defined(__Fuchsia__)
+#define SCUDO_FUCHSIA 1
+#else
+#define SCUDO_FUCHSIA 0
+#endif
+
+#if __LP64__
+#define SCUDO_WORDSIZE 64U
+#else
+#define SCUDO_WORDSIZE 32U
+#endif
+
+#if SCUDO_WORDSIZE == 64U
+#define FIRST_32_SECOND_64(a, b) (b)
+#else
+#define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#ifndef SCUDO_CAN_USE_PRIMARY64
+#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
+#endif
+
+#ifndef SCUDO_MIN_ALIGNMENT_LOG
+// We force malloc-type functions to be aligned to std::max_align_t, but there
+// is no reason why the minimum alignment for all other functions can't be 8
+// bytes. Except obviously for applications making incorrect assumptions.
+// TODO(kostyak): define SCUDO_MIN_ALIGNMENT_LOG 3
+#define SCUDO_MIN_ALIGNMENT_LOG FIRST_32_SECOND_64(3, 4)
+#endif
+
+#if defined(__aarch64__)
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
+#else
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// Older gcc have issues aligning to a constexpr, and require an integer.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define SCUDO_CACHE_LINE_SIZE 128
+#else
+#define SCUDO_CACHE_LINE_SIZE 64
+#endif
+
+#define SCUDO_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+
+#endif // SCUDO_PLATFORM_H_
diff --git a/lib/scudo/standalone/primary32.h b/lib/scudo/standalone/primary32.h
new file mode 100644
index 000000000000..2b2fa8b3d793
--- /dev/null
+++ b/lib/scudo/standalone/primary32.h
@@ -0,0 +1,401 @@
+//===-- primary32.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY32_H_
+#define SCUDO_PRIMARY32_H_
+
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "release.h"
+#include "report.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
+//
+// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
+// boundary, and keeps a bytemap of the mappable address space to track the size
+// class they are associated with.
+//
+// Mapped regions are split into equally sized Blocks according to the size
+// class they belong to, and the associated pointers are shuffled to prevent any
+// predictable address pattern (the predictability increases with the block
+// size).
+//
+// Regions for size class 0 are special and used to hold TransferBatches, which
+// allow to transfer arrays of pointers from the global size class freelist to
+// the thread specific freelist for said class, and back.
+//
+// Memory used by this allocator is never unmapped but can be partially
+// reclaimed if the platform allows for it.
+
+template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
+public:
+ typedef SizeClassMapT SizeClassMap;
+ // Regions should be large enough to hold the largest Block.
+ COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize);
+ typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+ typedef typename CacheT::TransferBatch TransferBatch;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return (ClassId == SizeClassMap::BatchClassId)
+ ? sizeof(TransferBatch)
+ : SizeClassMap::getSizeByClassId(ClassId);
+ }
+
+ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+ void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ if (SCUDO_FUCHSIA)
+ reportError("SizeClassAllocator32 is not supported on Fuchsia");
+
+ PossibleRegions.initLinkerInitialized();
+ MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
+
+ u32 Seed;
+ if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+ Seed =
+ static_cast<u32>(getMonotonicTime() ^
+ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
+ const uptr PageSize = getPageSizeCached();
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ Sci->RandState = getRandomU32(&Seed);
+ // See comment in the 64-bit primary about releasing smaller size classes.
+ Sci->CanRelease = (ReleaseToOsInterval > 0) &&
+ (I != SizeClassMap::BatchClassId) &&
+ (getSizeByClassId(I) >= (PageSize / 32));
+ }
+ ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ }
+ void init(s32 ReleaseToOsInterval) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(ReleaseToOsInterval);
+ }
+
+ void unmapTestOnly() {
+ while (NumberOfStashedRegions > 0)
+ unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+ RegionSize);
+ // TODO(kostyak): unmap the TransferBatch regions as well.
+ for (uptr I = 0; I < NumRegions; I++)
+ if (PossibleRegions[I])
+ unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
+ PossibleRegions.unmapTestOnly();
+ }
+
+ TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ ScopedLock L(Sci->Mutex);
+ TransferBatch *B = Sci->FreeList.front();
+ if (B)
+ Sci->FreeList.pop_front();
+ else {
+ B = populateFreeList(C, ClassId, Sci);
+ if (UNLIKELY(!B))
+ return nullptr;
+ }
+ DCHECK_GT(B->getCount(), 0);
+ Sci->Stats.PoppedBlocks += B->getCount();
+ return B;
+ }
+
+ void pushBatch(uptr ClassId, TransferBatch *B) {
+ DCHECK_LT(ClassId, NumClasses);
+ DCHECK_GT(B->getCount(), 0);
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ ScopedLock L(Sci->Mutex);
+ Sci->FreeList.push_front(B);
+ Sci->Stats.PushedBlocks += B->getCount();
+ if (Sci->CanRelease)
+ releaseToOSMaybe(Sci, ClassId);
+ }
+
+ void disable() {
+ for (uptr I = 0; I < NumClasses; I++)
+ getSizeClassInfo(I)->Mutex.lock();
+ }
+
+ void enable() {
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
+ getSizeClassInfo(I)->Mutex.unlock();
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) {
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+ if (PossibleRegions[I]) {
+ const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
+ const uptr From = I * RegionSize;
+ const uptr To = From + (RegionSize / BlockSize) * BlockSize;
+ for (uptr Block = From; Block < To; Block += BlockSize)
+ Callback(Block);
+ }
+ }
+
+ void printStats() {
+ // TODO(kostyak): get the RSS per region.
+ uptr TotalMapped = 0;
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ TotalMapped += Sci->AllocatedUser;
+ PoppedBlocks += Sci->Stats.PoppedBlocks;
+ PushedBlocks += Sci->Stats.PushedBlocks;
+ }
+ Printf("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
+ "remains %zu\n",
+ TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
+ for (uptr I = 0; I < NumClasses; I++)
+ printStats(I, 0);
+ }
+
+ void releaseToOS() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ releaseToOSMaybe(Sci, I, /*Force=*/true);
+ }
+ }
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr RegionSize = 1UL << RegionSizeLog;
+ static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
+#if SCUDO_WORDSIZE == 32U
+ typedef FlatByteMap<NumRegions> ByteMap;
+#else
+ typedef TwoLevelByteMap<(NumRegions >> 12), 1UL << 12> ByteMap;
+#endif
+
+ struct SizeClassStats {
+ uptr PoppedBlocks;
+ uptr PushedBlocks;
+ };
+
+ struct ReleaseToOsInfo {
+ uptr PushedBlocksAtLastRelease;
+ uptr RangesReleased;
+ uptr LastReleasedBytes;
+ u64 LastReleaseAtNs;
+ };
+
+ struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
+ HybridMutex Mutex;
+ IntrusiveList<TransferBatch> FreeList;
+ SizeClassStats Stats;
+ bool CanRelease;
+ u32 RandState;
+ uptr AllocatedUser;
+ ReleaseToOsInfo ReleaseInfo;
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+
+ uptr computeRegionId(uptr Mem) {
+ const uptr Id = Mem >> RegionSizeLog;
+ CHECK_LT(Id, NumRegions);
+ return Id;
+ }
+
+ uptr allocateRegionSlow() {
+ uptr MapSize = 2 * RegionSize;
+ const uptr MapBase = reinterpret_cast<uptr>(
+ map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
+ if (UNLIKELY(!MapBase))
+ return 0;
+ const uptr MapEnd = MapBase + MapSize;
+ uptr Region = MapBase;
+ if (isAligned(Region, RegionSize)) {
+ ScopedLock L(RegionsStashMutex);
+ if (NumberOfStashedRegions < MaxStashedRegions)
+ RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
+ else
+ MapSize = RegionSize;
+ } else {
+ Region = roundUpTo(MapBase, RegionSize);
+ unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
+ MapSize = RegionSize;
+ }
+ const uptr End = Region + MapSize;
+ if (End != MapEnd)
+ unmap(reinterpret_cast<void *>(End), MapEnd - End);
+ return Region;
+ }
+
+ uptr allocateRegion(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ uptr Region = 0;
+ {
+ ScopedLock L(RegionsStashMutex);
+ if (NumberOfStashedRegions > 0)
+ Region = RegionsStash[--NumberOfStashedRegions];
+ }
+ if (!Region)
+ Region = allocateRegionSlow();
+ if (LIKELY(Region)) {
+ if (ClassId) {
+ const uptr RegionIndex = computeRegionId(Region);
+ if (RegionIndex < MinRegionIndex)
+ MinRegionIndex = RegionIndex;
+ if (RegionIndex > MaxRegionIndex)
+ MaxRegionIndex = RegionIndex;
+ PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
+ }
+ }
+ return Region;
+ }
+
+ SizeClassInfo *getSizeClassInfo(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ return &SizeClassInfoArray[ClassId];
+ }
+
+ bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
+ TransferBatch **CurrentBatch, u32 MaxCount,
+ void **PointersArray, u32 Count) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ shuffle(PointersArray, Count, &Sci->RandState);
+ TransferBatch *B = *CurrentBatch;
+ for (uptr I = 0; I < Count; I++) {
+ if (B && B->getCount() == MaxCount) {
+ Sci->FreeList.push_back(B);
+ B = nullptr;
+ }
+ if (!B) {
+ B = C->createBatch(ClassId, PointersArray[I]);
+ if (UNLIKELY(!B))
+ return false;
+ B->clear();
+ }
+ B->add(PointersArray[I]);
+ }
+ *CurrentBatch = B;
+ return true;
+ }
+
+ NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
+ SizeClassInfo *Sci) {
+ const uptr Region = allocateRegion(ClassId);
+ if (UNLIKELY(!Region))
+ return nullptr;
+ C->getStats().add(StatMapped, RegionSize);
+ const uptr Size = getSizeByClassId(ClassId);
+ const u32 MaxCount = TransferBatch::getMaxCached(Size);
+ DCHECK_GT(MaxCount, 0);
+ const uptr NumberOfBlocks = RegionSize / Size;
+ DCHECK_GT(NumberOfBlocks, 0);
+ TransferBatch *B = nullptr;
+ constexpr uptr ShuffleArraySize = 48;
+ void *ShuffleArray[ShuffleArraySize];
+ u32 Count = 0;
+ const uptr AllocatedUser = NumberOfBlocks * Size;
+ for (uptr I = Region; I < Region + AllocatedUser; I += Size) {
+ ShuffleArray[Count++] = reinterpret_cast<void *>(I);
+ if (Count == ShuffleArraySize) {
+ if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ Count = 0;
+ }
+ }
+ if (Count) {
+ if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
+ Count)))
+ return nullptr;
+ }
+ DCHECK(B);
+ DCHECK_GT(B->getCount(), 0);
+ Sci->AllocatedUser += AllocatedUser;
+ if (Sci->CanRelease)
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ return B;
+ }
+
+ void printStats(uptr ClassId, uptr Rss) {
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ if (Sci->AllocatedUser == 0)
+ return;
+ const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
+ const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
+ Printf(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: %6zu"
+ " avail: %6zu rss: %6zuK\n",
+ ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
+ Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
+ AvailableChunks, Rss >> 10);
+ }
+
+ NOINLINE void releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
+ bool Force = false) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr PageSize = getPageSizeCached();
+
+ CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+ const uptr N = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
+ if (N * BlockSize < PageSize)
+ return; // No chance to release anything.
+ if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
+ BlockSize <
+ PageSize) {
+ return; // Nothing new to release.
+ }
+
+ if (!Force) {
+ const s32 IntervalMs = ReleaseToOsIntervalMs;
+ if (IntervalMs < 0)
+ return;
+ if (Sci->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+ getMonotonicTime()) {
+ return; // Memory was returned recently.
+ }
+ }
+
+ // TODO(kostyak): currently not ideal as we loop over all regions and
+ // iterate multiple times over the same freelist if a ClassId spans multiple
+ // regions. But it will have to do for now.
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
+ if (PossibleRegions[I] == ClassId) {
+ ReleaseRecorder Recorder(I * RegionSize);
+ releaseFreeMemoryToOS(&Sci->FreeList, I * RegionSize,
+ RegionSize / PageSize, BlockSize, &Recorder);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+ Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ }
+ }
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ }
+
+ SizeClassInfo SizeClassInfoArray[NumClasses];
+
+ ByteMap PossibleRegions;
+ // Keep track of the lowest & highest regions allocated to avoid looping
+ // through the whole NumRegions.
+ uptr MinRegionIndex;
+ uptr MaxRegionIndex;
+ s32 ReleaseToOsIntervalMs;
+ // Unless several threads request regions simultaneously from different size
+ // classes, the stash rarely contains more than 1 entry.
+ static constexpr uptr MaxStashedRegions = 4;
+ HybridMutex RegionsStashMutex;
+ uptr NumberOfStashedRegions;
+ uptr RegionsStash[MaxStashedRegions];
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY32_H_
diff --git a/lib/scudo/standalone/primary64.h b/lib/scudo/standalone/primary64.h
new file mode 100644
index 000000000000..035182b33ef4
--- /dev/null
+++ b/lib/scudo/standalone/primary64.h
@@ -0,0 +1,381 @@
+//===-- primary64.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY64_H_
+#define SCUDO_PRIMARY64_H_
+
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "release.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
+//
+// It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
+// Regions, specific to each size class. Note that the base of that mapping is
+// random (based to the platform specific map() capabilities), and that each
+// Region actually starts at a random offset from its base.
+//
+// Regions are mapped incrementally on demand to fulfill allocation requests,
+// those mappings being split into equally sized Blocks based on the size class
+// they belong to. The Blocks created are shuffled to prevent predictable
+// address patterns (the predictability increases with the size of the Blocks).
+//
+// The 1st Region (for size class 0) holds the TransferBatches. This is a
+// structure used to transfer arrays of available pointers from the class size
+// freelist to the thread specific freelist, and back.
+//
+// The memory used by this allocator is never unmapped, but can be partially
+// released it the platform allows for it.
+
+template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
+public:
+ typedef SizeClassMapT SizeClassMap;
+ typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+ typedef typename CacheT::TransferBatch TransferBatch;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return (ClassId == SizeClassMap::BatchClassId)
+ ? sizeof(TransferBatch)
+ : SizeClassMap::getSizeByClassId(ClassId);
+ }
+
+ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+ void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ // Reserve the space required for the Primary.
+ PrimaryBase = reinterpret_cast<uptr>(
+ map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
+
+ RegionInfoArray = reinterpret_cast<RegionInfo *>(
+ map(nullptr, sizeof(RegionInfo) * NumClasses, "scudo:regioninfo"));
+ DCHECK_EQ(reinterpret_cast<uptr>(RegionInfoArray) % SCUDO_CACHE_LINE_SIZE,
+ 0);
+
+ u32 Seed;
+ if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+ Seed = static_cast<u32>(getMonotonicTime() ^ (PrimaryBase >> 12));
+ const uptr PageSize = getPageSizeCached();
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ // The actual start of a region is offseted by a random number of pages.
+ Region->RegionBeg =
+ getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
+ // Releasing smaller size classes doesn't necessarily yield to a
+ // meaningful RSS impact: there are more blocks per page, they are
+ // randomized around, and thus pages are less likely to be entirely empty.
+ // On top of this, attempting to release those require more iterations and
+ // memory accesses which ends up being fairly costly. The current lower
+ // limit is mostly arbitrary and based on empirical observations.
+ // TODO(kostyak): make the lower limit a runtime option
+ Region->CanRelease = (ReleaseToOsInterval > 0) &&
+ (I != SizeClassMap::BatchClassId) &&
+ (getSizeByClassId(I) >= (PageSize / 32));
+ Region->RandState = getRandomU32(&Seed);
+ }
+ ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ }
+ void init(s32 ReleaseToOsInterval) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(ReleaseToOsInterval);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
+ unmap(reinterpret_cast<void *>(RegionInfoArray),
+ sizeof(RegionInfo) * NumClasses);
+ }
+
+ TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ RegionInfo *Region = getRegionInfo(ClassId);
+ ScopedLock L(Region->Mutex);
+ TransferBatch *B = Region->FreeList.front();
+ if (B)
+ Region->FreeList.pop_front();
+ else {
+ B = populateFreeList(C, ClassId, Region);
+ if (UNLIKELY(!B))
+ return nullptr;
+ }
+ DCHECK_GT(B->getCount(), 0);
+ Region->Stats.PoppedBlocks += B->getCount();
+ return B;
+ }
+
+ void pushBatch(uptr ClassId, TransferBatch *B) {
+ DCHECK_GT(B->getCount(), 0);
+ RegionInfo *Region = getRegionInfo(ClassId);
+ ScopedLock L(Region->Mutex);
+ Region->FreeList.push_front(B);
+ Region->Stats.PushedBlocks += B->getCount();
+ if (Region->CanRelease)
+ releaseToOSMaybe(Region, ClassId);
+ }
+
+ void disable() {
+ for (uptr I = 0; I < NumClasses; I++)
+ getRegionInfo(I)->Mutex.lock();
+ }
+
+ void enable() {
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
+ getRegionInfo(I)->Mutex.unlock();
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) const {
+ for (uptr I = 1; I < NumClasses; I++) {
+ const RegionInfo *Region = getRegionInfo(I);
+ const uptr BlockSize = getSizeByClassId(I);
+ const uptr From = Region->RegionBeg;
+ const uptr To = From + Region->AllocatedUser;
+ for (uptr Block = From; Block < To; Block += BlockSize)
+ Callback(Block);
+ }
+ }
+
+ void printStats() const {
+ // TODO(kostyak): get the RSS per region.
+ uptr TotalMapped = 0;
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ if (Region->MappedUser)
+ TotalMapped += Region->MappedUser;
+ PoppedBlocks += Region->Stats.PoppedBlocks;
+ PushedBlocks += Region->Stats.PushedBlocks;
+ }
+ Printf("Stats: Primary64: %zuM mapped (%zuM rss) in %zu allocations; "
+ "remains %zu\n",
+ TotalMapped >> 20, 0, PoppedBlocks, PoppedBlocks - PushedBlocks);
+
+ for (uptr I = 0; I < NumClasses; I++)
+ printStats(I, 0);
+ }
+
+ void releaseToOS() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L(Region->Mutex);
+ releaseToOSMaybe(Region, I, /*Force=*/true);
+ }
+ }
+
+private:
+ static const uptr RegionSize = 1UL << RegionSizeLog;
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr PrimarySize = RegionSize * NumClasses;
+
+ // Call map for user memory with at least this size.
+ static const uptr MapSizeIncrement = 1UL << 16;
+
+ struct RegionStats {
+ uptr PoppedBlocks;
+ uptr PushedBlocks;
+ };
+
+ struct ReleaseToOsInfo {
+ uptr PushedBlocksAtLastRelease;
+ uptr RangesReleased;
+ uptr LastReleasedBytes;
+ u64 LastReleaseAtNs;
+ };
+
+ struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
+ HybridMutex Mutex;
+ IntrusiveList<TransferBatch> FreeList;
+ RegionStats Stats;
+ bool CanRelease;
+ bool Exhausted;
+ u32 RandState;
+ uptr RegionBeg;
+ uptr MappedUser; // Bytes mapped for user memory.
+ uptr AllocatedUser; // Bytes allocated for user memory.
+ MapPlatformData Data;
+ ReleaseToOsInfo ReleaseInfo;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+
+ uptr PrimaryBase;
+ RegionInfo *RegionInfoArray;
+ MapPlatformData Data;
+ s32 ReleaseToOsIntervalMs;
+
+ RegionInfo *getRegionInfo(uptr ClassId) const {
+ DCHECK_LT(ClassId, NumClasses);
+ return &RegionInfoArray[ClassId];
+ }
+
+ uptr getRegionBaseByClassId(uptr ClassId) const {
+ return PrimaryBase + (ClassId << RegionSizeLog);
+ }
+
+ bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId,
+ TransferBatch **CurrentBatch, u32 MaxCount,
+ void **PointersArray, u32 Count) {
+ // No need to shuffle the batches size class.
+ if (ClassId != SizeClassMap::BatchClassId)
+ shuffle(PointersArray, Count, &Region->RandState);
+ TransferBatch *B = *CurrentBatch;
+ for (uptr I = 0; I < Count; I++) {
+ if (B && B->getCount() == MaxCount) {
+ Region->FreeList.push_back(B);
+ B = nullptr;
+ }
+ if (!B) {
+ B = C->createBatch(ClassId, PointersArray[I]);
+ if (UNLIKELY(!B))
+ return false;
+ B->clear();
+ }
+ B->add(PointersArray[I]);
+ }
+ *CurrentBatch = B;
+ return true;
+ }
+
+ NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
+ RegionInfo *Region) {
+ const uptr Size = getSizeByClassId(ClassId);
+ const u32 MaxCount = TransferBatch::getMaxCached(Size);
+
+ const uptr RegionBeg = Region->RegionBeg;
+ const uptr MappedUser = Region->MappedUser;
+ const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
+ // Map more space for blocks, if necessary.
+ if (LIKELY(TotalUserBytes > MappedUser)) {
+ // Do the mmap for the user memory.
+ const uptr UserMapSize =
+ roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
+ const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
+ if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
+ if (!Region->Exhausted) {
+ Region->Exhausted = true;
+ printStats();
+ Printf(
+ "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
+ RegionSize >> 20, Size);
+ }
+ return nullptr;
+ }
+ if (MappedUser == 0)
+ Region->Data = Data;
+ if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
+ UserMapSize, "scudo:primary",
+ MAP_ALLOWNOMEM | MAP_RESIZABLE, &Region->Data)))
+ return nullptr;
+ Region->MappedUser += UserMapSize;
+ C->getStats().add(StatMapped, UserMapSize);
+ }
+
+ const uptr NumberOfBlocks = Min(
+ 8UL * MaxCount, (Region->MappedUser - Region->AllocatedUser) / Size);
+ DCHECK_GT(NumberOfBlocks, 0);
+
+ TransferBatch *B = nullptr;
+ constexpr uptr ShuffleArraySize = 48;
+ void *ShuffleArray[ShuffleArraySize];
+ u32 Count = 0;
+ const uptr P = RegionBeg + Region->AllocatedUser;
+ const uptr AllocatedUser = NumberOfBlocks * Size;
+ for (uptr I = P; I < P + AllocatedUser; I += Size) {
+ ShuffleArray[Count++] = reinterpret_cast<void *>(I);
+ if (Count == ShuffleArraySize) {
+ if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ Count = 0;
+ }
+ }
+ if (Count) {
+ if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ }
+ DCHECK(B);
+ CHECK_GT(B->getCount(), 0);
+
+ Region->AllocatedUser += AllocatedUser;
+ Region->Exhausted = false;
+ if (Region->CanRelease)
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
+ return B;
+ }
+
+ void printStats(uptr ClassId, uptr Rss) const {
+ RegionInfo *Region = getRegionInfo(ClassId);
+ if (Region->MappedUser == 0)
+ return;
+ const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
+ const uptr AvailableChunks =
+ Region->AllocatedUser / getSizeByClassId(ClassId);
+ Printf("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: "
+ "%6zu avail: %6zu rss: %6zuK releases: %6zu last released: %6zuK "
+ "region: 0x%zx (0x%zx)\n",
+ Region->Exhausted ? "F" : " ", ClassId, getSizeByClassId(ClassId),
+ Region->MappedUser >> 10, Region->Stats.PoppedBlocks,
+ Region->Stats.PushedBlocks, InUse, AvailableChunks, Rss >> 10,
+ Region->ReleaseInfo.RangesReleased,
+ Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
+ getRegionBaseByClassId(ClassId));
+ }
+
+ NOINLINE void releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
+ bool Force = false) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr PageSize = getPageSizeCached();
+
+ CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
+ const uptr N = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
+ if (N * BlockSize < PageSize)
+ return; // No chance to release anything.
+ if ((Region->Stats.PushedBlocks -
+ Region->ReleaseInfo.PushedBlocksAtLastRelease) *
+ BlockSize <
+ PageSize) {
+ return; // Nothing new to release.
+ }
+
+ if (!Force) {
+ const s32 IntervalMs = ReleaseToOsIntervalMs;
+ if (IntervalMs < 0)
+ return;
+ if (Region->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+ getMonotonicTime()) {
+ return; // Memory was returned recently.
+ }
+ }
+
+ ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
+ releaseFreeMemoryToOS(&Region->FreeList, Region->RegionBeg,
+ roundUpTo(Region->AllocatedUser, PageSize) / PageSize,
+ BlockSize, &Recorder);
+
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Region->ReleaseInfo.PushedBlocksAtLastRelease =
+ Region->Stats.PushedBlocks;
+ Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY64_H_
diff --git a/lib/scudo/standalone/quarantine.h b/lib/scudo/standalone/quarantine.h
new file mode 100644
index 000000000000..bac36e01c1dd
--- /dev/null
+++ b/lib/scudo/standalone/quarantine.h
@@ -0,0 +1,289 @@
+//===-- quarantine.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_QUARANTINE_H_
+#define SCUDO_QUARANTINE_H_
+
+#include "list.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+struct QuarantineBatch {
+ // With the following count, a batch (and the header that protects it) occupy
+ // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
+ static const u32 MaxCount = 1019;
+ QuarantineBatch *Next;
+ uptr Size;
+ u32 Count;
+ void *Batch[MaxCount];
+
+ void init(void *Ptr, uptr Size) {
+ Count = 1;
+ Batch[0] = Ptr;
+ this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
+ }
+
+ // The total size of quarantined nodes recorded in this batch.
+ uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
+
+ void push_back(void *Ptr, uptr Size) {
+ DCHECK_LT(Count, MaxCount);
+ Batch[Count++] = Ptr;
+ this->Size += Size;
+ }
+
+ bool canMerge(const QuarantineBatch *const From) const {
+ return Count + From->Count <= MaxCount;
+ }
+
+ void merge(QuarantineBatch *const From) {
+ DCHECK_LE(Count + From->Count, MaxCount);
+ DCHECK_GE(Size, sizeof(QuarantineBatch));
+
+ for (uptr I = 0; I < From->Count; ++I)
+ Batch[Count + I] = From->Batch[I];
+ Count += From->Count;
+ Size += From->getQuarantinedSize();
+
+ From->Count = 0;
+ From->Size = sizeof(QuarantineBatch);
+ }
+
+ void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
+};
+
+COMPILER_CHECK(sizeof(QuarantineBatch) <= (1U << 13)); // 8Kb.
+
+// Per-thread cache of memory blocks.
+template <typename Callback> class QuarantineCache {
+public:
+ void initLinkerInitialized() {}
+ void init() {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized();
+ }
+
+ // Total memory used, including internal accounting.
+ uptr getSize() const { return atomic_load_relaxed(&Size); }
+ // Memory used for internal accounting.
+ uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
+
+ void enqueue(Callback Cb, void *Ptr, uptr Size) {
+ if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
+ QuarantineBatch *B =
+ reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
+ DCHECK(B);
+ B->init(Ptr, Size);
+ enqueueBatch(B);
+ } else {
+ List.back()->push_back(Ptr, Size);
+ addToSize(Size);
+ }
+ }
+
+ void transfer(QuarantineCache *From) {
+ List.append_back(&From->List);
+ addToSize(From->getSize());
+ atomic_store_relaxed(&From->Size, 0);
+ }
+
+ void enqueueBatch(QuarantineBatch *B) {
+ List.push_back(B);
+ addToSize(B->Size);
+ }
+
+ QuarantineBatch *dequeueBatch() {
+ if (List.empty())
+ return nullptr;
+ QuarantineBatch *B = List.front();
+ List.pop_front();
+ subFromSize(B->Size);
+ return B;
+ }
+
+ void mergeBatches(QuarantineCache *ToDeallocate) {
+ uptr ExtractedSize = 0;
+ QuarantineBatch *Current = List.front();
+ while (Current && Current->Next) {
+ if (Current->canMerge(Current->Next)) {
+ QuarantineBatch *Extracted = Current->Next;
+ // Move all the chunks into the current batch.
+ Current->merge(Extracted);
+ DCHECK_EQ(Extracted->Count, 0);
+ DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
+ // Remove the next batch From the list and account for its Size.
+ List.extract(Current, Extracted);
+ ExtractedSize += Extracted->Size;
+ // Add it to deallocation list.
+ ToDeallocate->enqueueBatch(Extracted);
+ } else {
+ Current = Current->Next;
+ }
+ }
+ subFromSize(ExtractedSize);
+ }
+
+ void printStats() const {
+ uptr BatchCount = 0;
+ uptr TotalOverheadBytes = 0;
+ uptr TotalBytes = 0;
+ uptr TotalQuarantineChunks = 0;
+ for (const QuarantineBatch &Batch : List) {
+ BatchCount++;
+ TotalBytes += Batch.Size;
+ TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
+ TotalQuarantineChunks += Batch.Count;
+ }
+ const uptr QuarantineChunksCapacity =
+ BatchCount * QuarantineBatch::MaxCount;
+ const uptr ChunksUsagePercent =
+ (QuarantineChunksCapacity == 0)
+ ? 0
+ : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
+ const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
+ const uptr MemoryOverheadPercent =
+ (TotalQuarantinedBytes == 0)
+ ? 0
+ : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
+ Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
+ "chunks: %zd (capacity: %zd); %zd%% chunks used; %zd%% memory "
+ "overhead\n",
+ BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
+ QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
+ }
+
+private:
+ IntrusiveList<QuarantineBatch> List;
+ atomic_uptr Size;
+
+ void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
+ void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
+};
+
+// The callback interface is:
+// void Callback::recycle(Node *Ptr);
+// void *Callback::allocate(uptr Size);
+// void Callback::deallocate(void *Ptr);
+template <typename Callback, typename Node> class GlobalQuarantine {
+public:
+ typedef QuarantineCache<Callback> CacheT;
+
+ void initLinkerInitialized(uptr Size, uptr CacheSize) {
+ // Thread local quarantine size can be zero only when global quarantine size
+ // is zero (it allows us to perform just one atomic read per put() call).
+ CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
+
+ atomic_store_relaxed(&MaxSize, Size);
+ atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
+ atomic_store_relaxed(&MaxCacheSize, CacheSize);
+
+ Cache.initLinkerInitialized();
+ }
+ void init(uptr Size, uptr CacheSize) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Size, CacheSize);
+ }
+
+ uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
+ uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
+
+ void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
+ C->enqueue(Cb, Ptr, Size);
+ if (C->getSize() > getCacheSize())
+ drain(C, Cb);
+ }
+
+ void NOINLINE drain(CacheT *C, Callback Cb) {
+ {
+ ScopedLock L(CacheMutex);
+ Cache.transfer(C);
+ }
+ if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock())
+ recycle(atomic_load_relaxed(&MinSize), Cb);
+ }
+
+ void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
+ {
+ ScopedLock L(CacheMutex);
+ Cache.transfer(C);
+ }
+ RecyleMutex.lock();
+ recycle(0, Cb);
+ }
+
+ void printStats() const {
+ // It assumes that the world is stopped, just as the allocator's printStats.
+ Printf("Quarantine limits: global: %zdM; thread local: %zdK\n",
+ getMaxSize() >> 20, getCacheSize() >> 10);
+ Cache.printStats();
+ }
+
+private:
+ // Read-only data.
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
+ CacheT Cache;
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;
+ atomic_uptr MinSize;
+ atomic_uptr MaxSize;
+ alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
+
+ void NOINLINE recycle(uptr MinSize, Callback Cb) {
+ CacheT Tmp;
+ Tmp.init();
+ {
+ ScopedLock L(CacheMutex);
+ // Go over the batches and merge partially filled ones to
+ // save some memory, otherwise batches themselves (since the memory used
+ // by them is counted against quarantine limit) can overcome the actual
+ // user's quarantined chunks, which diminishes the purpose of the
+ // quarantine.
+ const uptr CacheSize = Cache.getSize();
+ const uptr OverheadSize = Cache.getOverheadSize();
+ DCHECK_GE(CacheSize, OverheadSize);
+ // Do the merge only when overhead exceeds this predefined limit (might
+ // require some tuning). It saves us merge attempt when the batch list
+ // quarantine is unlikely to contain batches suitable for merge.
+ constexpr uptr OverheadThresholdPercents = 100;
+ if (CacheSize > OverheadSize &&
+ OverheadSize * (100 + OverheadThresholdPercents) >
+ CacheSize * OverheadThresholdPercents) {
+ Cache.mergeBatches(&Tmp);
+ }
+ // Extract enough chunks from the quarantine to get below the max
+ // quarantine size and leave some leeway for the newly quarantined chunks.
+ while (Cache.getSize() > MinSize)
+ Tmp.enqueueBatch(Cache.dequeueBatch());
+ }
+ RecyleMutex.unlock();
+ doRecycle(&Tmp, Cb);
+ }
+
+ void NOINLINE doRecycle(CacheT *C, Callback Cb) {
+ while (QuarantineBatch *B = C->dequeueBatch()) {
+ const u32 Seed = static_cast<u32>(
+ (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
+ B->shuffle(Seed);
+ constexpr uptr NumberOfPrefetch = 8UL;
+ CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
+ for (uptr I = 0; I < NumberOfPrefetch; I++)
+ PREFETCH(B->Batch[I]);
+ for (uptr I = 0, Count = B->Count; I < Count; I++) {
+ if (I + NumberOfPrefetch < Count)
+ PREFETCH(B->Batch[I + NumberOfPrefetch]);
+ Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
+ }
+ Cb.deallocate(B);
+ }
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_QUARANTINE_H_
diff --git a/lib/scudo/standalone/release.h b/lib/scudo/standalone/release.h
new file mode 100644
index 000000000000..4fe29fde4bde
--- /dev/null
+++ b/lib/scudo/standalone/release.h
@@ -0,0 +1,262 @@
+//===-- release.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_RELEASE_H_
+#define SCUDO_RELEASE_H_
+
+#include "common.h"
+#include "list.h"
+
+namespace scudo {
+
+class ReleaseRecorder {
+public:
+ ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr)
+ : BaseAddress(BaseAddress), Data(Data) {}
+
+ uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+ uptr getReleasedBytes() const { return ReleasedBytes; }
+
+ // Releases [From, To) range of pages back to OS.
+ void releasePageRangeToOS(uptr From, uptr To) {
+ const uptr Size = To - From;
+ releasePagesToOS(BaseAddress, From, Size, Data);
+ ReleasedRangesCount++;
+ ReleasedBytes += Size;
+ }
+
+private:
+ uptr ReleasedRangesCount = 0;
+ uptr ReleasedBytes = 0;
+ uptr BaseAddress = 0;
+ MapPlatformData *Data = nullptr;
+};
+
+// A packed array of Counters. Each counter occupies 2^N bits, enough to store
+// counter's MaxValue. Ctor will try to allocate the required Buffer via map()
+// and the caller is expected to check whether the initialization was successful
+// by checking isAllocated() result. For the performance sake, none of the
+// accessors check the validity of the arguments, It is assumed that Index is
+// always in [0, N) range and the value is not incremented past MaxValue.
+class PackedCounterArray {
+public:
+ PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) {
+ CHECK_GT(NumCounters, 0);
+ CHECK_GT(MaxValue, 0);
+ constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's Index and offset.
+ const uptr CounterSizeBits =
+ roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+ CHECK_LE(CounterSizeBits, MaxCounterBits);
+ CounterSizeBitsLog = getLog2(CounterSizeBits);
+ CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
+
+ const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
+ CHECK_GT(PackingRatio, 0);
+ PackingRatioLog = getLog2(PackingRatio);
+ BitOffsetMask = PackingRatio - 1;
+
+ BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >>
+ PackingRatioLog) *
+ sizeof(*Buffer);
+ Buffer = reinterpret_cast<uptr *>(
+ map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+ }
+ ~PackedCounterArray() {
+ if (isAllocated())
+ unmap(reinterpret_cast<void *>(Buffer), BufferSize);
+ }
+
+ bool isAllocated() const { return !!Buffer; }
+
+ uptr getCount() const { return N; }
+
+ uptr get(uptr I) const {
+ DCHECK_LT(I, N);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ return (Buffer[Index] >> BitOffset) & CounterMask;
+ }
+
+ void inc(uptr I) const {
+ DCHECK_LT(get(I), CounterMask);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ Buffer[Index] += static_cast<uptr>(1U) << BitOffset;
+ }
+
+ void incRange(uptr From, uptr To) const {
+ DCHECK_LE(From, To);
+ for (uptr I = From; I <= To; I++)
+ inc(I);
+ }
+
+ uptr getBufferSize() const { return BufferSize; }
+
+private:
+ const uptr N;
+ uptr CounterSizeBitsLog;
+ uptr CounterMask;
+ uptr PackingRatioLog;
+ uptr BitOffsetMask;
+
+ uptr BufferSize;
+ uptr *Buffer;
+};
+
+template <class ReleaseRecorderT> class FreePagesRangeTracker {
+public:
+ explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
+ : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
+
+ void processNextPage(bool Freed) {
+ if (Freed) {
+ if (!InRange) {
+ CurrentRangeStatePage = CurrentPage;
+ InRange = true;
+ }
+ } else {
+ closeOpenedRange();
+ }
+ CurrentPage++;
+ }
+
+ void finish() { closeOpenedRange(); }
+
+private:
+ void closeOpenedRange() {
+ if (InRange) {
+ Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+ (CurrentPage << PageSizeLog));
+ InRange = false;
+ }
+ }
+
+ ReleaseRecorderT *const Recorder;
+ const uptr PageSizeLog;
+ bool InRange = false;
+ uptr CurrentPage = 0;
+ uptr CurrentRangeStatePage = 0;
+};
+
+template <class TransferBatchT, class ReleaseRecorderT>
+NOINLINE void
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base,
+ uptr AllocatedPagesCount, uptr BlockSize,
+ ReleaseRecorderT *Recorder) {
+ const uptr PageSize = getPageSizeCached();
+
+ // Figure out the number of chunks per page and whether we can take a fast
+ // path (the number of chunks per page is the same for all pages).
+ uptr FullPagesBlockCountMax;
+ bool SameBlockCountPerPage;
+ if (BlockSize <= PageSize) {
+ if (PageSize % BlockSize == 0) {
+ // Same number of chunks per page, no cross overs.
+ FullPagesBlockCountMax = PageSize / BlockSize;
+ SameBlockCountPerPage = true;
+ } else if (BlockSize % (PageSize % BlockSize) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 2;
+ SameBlockCountPerPage = false;
+ }
+ } else {
+ if (BlockSize % PageSize == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ FullPagesBlockCountMax = 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ FullPagesBlockCountMax = 2;
+ SameBlockCountPerPage = false;
+ }
+ }
+
+ PackedCounterArray Counters(AllocatedPagesCount, FullPagesBlockCountMax);
+ if (!Counters.isAllocated())
+ return;
+
+ const uptr PageSizeLog = getLog2(PageSize);
+ const uptr End = Base + AllocatedPagesCount * PageSize;
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+ // Each chunk affects one page only.
+ for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+ for (u32 I = 0; I < (*It).getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ if (P >= Base && P < End)
+ Counters.inc((P - Base) >> PageSizeLog);
+ }
+ }
+ } else {
+ // In all other cases chunks might affect more than one page.
+ for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+ for (u32 I = 0; I < (*It).getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ if (P >= Base && P < End)
+ Counters.incRange((P - Base) >> PageSizeLog,
+ (P - Base + BlockSize - 1) >> PageSizeLog);
+ }
+ }
+ }
+
+ // Iterate over pages detecting ranges of pages with chunk Counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
+ if (SameBlockCountPerPage) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr I = 0; I < Counters.getCount(); I++)
+ RangeTracker.processNextPage(Counters.get(I) == FullPagesBlockCountMax);
+ } else {
+ // Slow path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
+ const uptr Pnc = Pn * BlockSize;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ uptr PrevPageBoundary = 0;
+ uptr CurrentBoundary = 0;
+ for (uptr I = 0; I < Counters.getCount(); I++) {
+ const uptr PageBoundary = PrevPageBoundary + PageSize;
+ uptr BlocksPerPage = Pn;
+ if (CurrentBoundary < PageBoundary) {
+ if (CurrentBoundary > PrevPageBoundary)
+ BlocksPerPage++;
+ CurrentBoundary += Pnc;
+ if (CurrentBoundary < PageBoundary) {
+ BlocksPerPage++;
+ CurrentBoundary += BlockSize;
+ }
+ }
+ PrevPageBoundary = PageBoundary;
+
+ RangeTracker.processNextPage(Counters.get(I) == BlocksPerPage);
+ }
+ }
+ RangeTracker.finish();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_RELEASE_H_
diff --git a/lib/scudo/standalone/report.cc b/lib/scudo/standalone/report.cc
new file mode 100644
index 000000000000..47cd951e8ed4
--- /dev/null
+++ b/lib/scudo/standalone/report.cc
@@ -0,0 +1,192 @@
+//===-- report.cc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "report.h"
+
+#include "atomic_helpers.h"
+#include "string_utils.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedErrorReport {
+public:
+ ScopedErrorReport() : Message(512) { Message.append("Scudo ERROR: "); }
+ void append(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ Message.append(Format, Args);
+ va_end(Args);
+ }
+ NORETURN ~ScopedErrorReport() {
+ outputRaw(Message.data());
+ setAbortMessage(Message.data());
+ die();
+ }
+
+private:
+ ScopedString Message;
+};
+
+INLINE void NORETURN trap() { __builtin_trap(); }
+
+// This could potentially be called recursively if a CHECK fails in the reports.
+void NORETURN reportCheckFailed(const char *File, int Line,
+ const char *Condition, u64 Value1, u64 Value2) {
+ static atomic_u32 NumberOfCalls;
+ if (atomic_fetch_add(&NumberOfCalls, 1, memory_order_relaxed) > 2) {
+ // TODO(kostyak): maybe sleep here?
+ trap();
+ }
+ ScopedErrorReport Report;
+ Report.append("CHECK failed @ %s:%d %s (%llu, %llu)\n", File, Line, Condition,
+ Value1, Value2);
+}
+
+// Generic string fatal error message.
+void NORETURN reportError(const char *Message) {
+ ScopedErrorReport Report;
+ Report.append("%s\n", Message);
+}
+
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
+ ScopedErrorReport Report;
+ Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
+}
+
+// The checksum of a chunk header is invalid. This could be caused by an
+// {over,under}write of the header, a pointer that is not an actual chunk.
+void NORETURN reportHeaderCorruption(void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("corrupted chunk header at address %p\n", Ptr);
+}
+
+// Two threads have attempted to modify a chunk header at the same time. This is
+// symptomatic of a race-condition in the application code, or general lack of
+// proper locking.
+void NORETURN reportHeaderRace(void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("race on chunk header at address %p\n", Ptr);
+}
+
+// The allocator was compiled with parameters that conflict with field size
+// requirements.
+void NORETURN reportSanityCheckError(const char *Field) {
+ ScopedErrorReport Report;
+ Report.append("maximum possible %s doesn't fit in header\n", Field);
+}
+
+// We enforce a maximum alignment, to keep fields smaller and generally prevent
+// integer overflows, or unexpected corner cases.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment) {
+ ScopedErrorReport Report;
+ Report.append("invalid allocation alignment: %zu exceeds maximum supported "
+ "alignment of %zu\n",
+ Alignment, MaxAlignment);
+}
+
+// See above, we also enforce a maximum size.
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize) {
+ ScopedErrorReport Report;
+ Report.append("requested allocation size %zu (%zu after adjustments) exceeds "
+ "maximum supported size of %zu\n",
+ UserSize, TotalSize, MaxSize);
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+ ScopedErrorReport Report;
+ Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
+}
+
+static const char *stringifyAction(AllocatorAction Action) {
+ switch (Action) {
+ case AllocatorAction::Recycling:
+ return "recycling";
+ case AllocatorAction::Deallocating:
+ return "deallocating";
+ case AllocatorAction::Reallocating:
+ return "reallocating";
+ case AllocatorAction::Sizing:
+ return "sizing";
+ }
+ return "<invalid action>";
+}
+
+// The chunk is not in a state congruent with the operation we want to perform.
+// This is usually the case with a double-free, a realloc of a freed pointer.
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("invalid chunk state when %s address %p\n",
+ stringifyAction(Action), Ptr);
+}
+
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("misaligned pointer when %s address %p\n",
+ stringifyAction(Action), Ptr);
+}
+
+// The deallocation function used is at odds with the one used to allocate the
+// chunk (eg: new[]/delete or malloc/delete, and so on).
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+ u8 TypeA, u8 TypeB) {
+ ScopedErrorReport Report;
+ Report.append("allocation type mismatch when %s address %p (%d vs %d)\n",
+ stringifyAction(Action), Ptr, TypeA, TypeB);
+}
+
+// The size specified to the delete operator does not match the one that was
+// passed to new when allocating the chunk.
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size,
+ uptr ExpectedSize) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid sized delete when deallocating address %p (%zu vs %zu)\n", Ptr,
+ Size, ExpectedSize);
+}
+
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid allocation alignment: %zu, alignment must be a power of two\n",
+ Alignment);
+}
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("calloc parameters overflow: count * size (%zu * %zu) cannot "
+ "be represented with type size_t\n",
+ Count, Size);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid alignment requested in posix_memalign: %zu, alignment must be a "
+ "power of two and a multiple of sizeof(void *) == %zu\n",
+ Alignment, sizeof(void *));
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("pvalloc parameters overflow: size %zu rounded up to system "
+ "page size %zu cannot be represented in type size_t\n",
+ Size, getPageSizeCached());
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Alignment, uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("invalid alignment requested in aligned_alloc: %zu, alignment "
+ "must be a power of two and the requested size %zu must be a "
+ "multiple of alignment\n",
+ Alignment, Size);
+}
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/report.h b/lib/scudo/standalone/report.h
new file mode 100644
index 000000000000..14e4e799b736
--- /dev/null
+++ b/lib/scudo/standalone/report.h
@@ -0,0 +1,57 @@
+//===-- report.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_REPORT_H_
+#define SCUDO_REPORT_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Reports are *fatal* unless stated otherwise.
+
+// Generic error.
+void NORETURN reportError(const char *Message);
+
+// Flags related errors.
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
+
+// Chunk header related errors.
+void NORETURN reportHeaderCorruption(void *Ptr);
+void NORETURN reportHeaderRace(void *Ptr);
+
+// Sanity checks related error.
+void NORETURN reportSanityCheckError(const char *Field);
+
+// Combined allocator errors.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize);
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+enum class AllocatorAction : u8 {
+ Recycling,
+ Deallocating,
+ Reallocating,
+ Sizing,
+};
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr);
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr);
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+ u8 TypeA, u8 TypeB);
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size, uptr ExpectedSize);
+
+// C wrappers errors.
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+
+} // namespace scudo
+
+#endif // SCUDO_REPORT_H_
diff --git a/lib/scudo/standalone/secondary.cc b/lib/scudo/standalone/secondary.cc
new file mode 100644
index 000000000000..75f9171f1617
--- /dev/null
+++ b/lib/scudo/standalone/secondary.cc
@@ -0,0 +1,136 @@
+//===-- secondary.cc --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "secondary.h"
+
+#include "string_utils.h"
+
+namespace scudo {
+
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
+ DCHECK_GT(Size, AlignmentHint);
+ const uptr PageSize = getPageSizeCached();
+ const uptr MapSize =
+ roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize;
+ MapPlatformData Data = {};
+ uptr MapBase =
+ reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
+ MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+ if (!MapBase)
+ return nullptr;
+ uptr CommitBase = MapBase + PageSize;
+ uptr MapEnd = MapBase + MapSize;
+
+ // In the unlikely event of alignments larger than a page, adjust the amount
+ // of memory we want to commit, and trim the extra memory.
+ if (AlignmentHint >= PageSize) {
+ // For alignments greater than or equal to a page, the user pointer (eg: the
+ // pointer that is returned by the C or C++ allocation APIs) ends up on a
+ // page boundary , and our headers will live in the preceding page.
+ CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+ const uptr NewMapBase = CommitBase - PageSize;
+ DCHECK_GE(NewMapBase, MapBase);
+ // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+ // are less constrained memory wise, and that saves us two syscalls.
+ if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+ unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+ MapBase = NewMapBase;
+ }
+ const uptr NewMapEnd = CommitBase + PageSize +
+ roundUpTo((Size - AlignmentHint), PageSize) +
+ PageSize;
+ DCHECK_LE(NewMapEnd, MapEnd);
+ if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+ unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+ MapEnd = NewMapEnd;
+ }
+ }
+
+ const uptr CommitSize = MapEnd - PageSize - CommitBase;
+ const uptr Ptr =
+ reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
+ CommitSize, "scudo:secondary", 0, &Data));
+ LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+ H->MapBase = MapBase;
+ H->MapSize = MapEnd - MapBase;
+ H->BlockEnd = CommitBase + CommitSize;
+ H->Data = Data;
+ {
+ ScopedLock L(Mutex);
+ if (!Tail) {
+ Tail = H;
+ } else {
+ Tail->Next = H;
+ H->Prev = Tail;
+ Tail = H;
+ }
+ AllocatedBytes += CommitSize;
+ if (LargestSize < CommitSize)
+ LargestSize = CommitSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, CommitSize);
+ Stats.add(StatMapped, H->MapSize);
+ }
+ if (BlockEnd)
+ *BlockEnd = CommitBase + CommitSize;
+ return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+}
+
+void MapAllocator::deallocate(void *Ptr) {
+ LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+ {
+ ScopedLock L(Mutex);
+ LargeBlock::Header *Prev = H->Prev;
+ LargeBlock::Header *Next = H->Next;
+ if (Prev) {
+ CHECK_EQ(Prev->Next, H);
+ Prev->Next = Next;
+ }
+ if (Next) {
+ CHECK_EQ(Next->Prev, H);
+ Next->Prev = Prev;
+ }
+ if (Tail == H) {
+ CHECK(!Next);
+ Tail = Prev;
+ } else {
+ CHECK(Next);
+ }
+ const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+ FreedBytes += CommitSize;
+ NumberOfFrees++;
+ Stats.sub(StatAllocated, CommitSize);
+ Stats.sub(StatMapped, H->MapSize);
+ }
+ void *Addr = reinterpret_cast<void *>(H->MapBase);
+ const uptr Size = H->MapSize;
+ MapPlatformData Data;
+ Data = H->Data;
+ unmap(Addr, Size, UNMAP_ALL, &Data);
+}
+
+void MapAllocator::printStats() const {
+ Printf("Stats: MapAllocator: allocated %zd times (%zdK), freed %zd times "
+ "(%zdK), remains %zd (%zdK) max %zdM\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+ NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+ LargestSize >> 20);
+}
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/secondary.h b/lib/scudo/standalone/secondary.h
new file mode 100644
index 000000000000..9124e2a41c6a
--- /dev/null
+++ b/lib/scudo/standalone/secondary.h
@@ -0,0 +1,97 @@
+//===-- secondary.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SECONDARY_H_
+#define SCUDO_SECONDARY_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "stats.h"
+
+namespace scudo {
+
+// This allocator wraps the platform allocation primitives, and as such is on
+// the slower side and should preferably be used for larger sized allocations.
+// Blocks allocated will be preceded and followed by a guard page, and hold
+// their own header that is not checksummed: the guard pages and the Combined
+// header should be enough for our purpose.
+
+namespace LargeBlock {
+
+struct Header {
+ LargeBlock::Header *Prev;
+ LargeBlock::Header *Next;
+ uptr BlockEnd;
+ uptr MapBase;
+ uptr MapSize;
+ MapPlatformData Data;
+};
+
+constexpr uptr getHeaderSize() {
+ return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+static Header *getHeader(uptr Ptr) {
+ return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+}
+
+static Header *getHeader(const void *Ptr) {
+ return getHeader(reinterpret_cast<uptr>(Ptr));
+}
+
+} // namespace LargeBlock
+
+class MapAllocator {
+public:
+ void initLinkerInitialized(GlobalStats *S) {
+ Stats.initLinkerInitialized();
+ if (S)
+ S->link(&Stats);
+ }
+ void init(GlobalStats *S) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(S);
+ }
+
+ void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr);
+
+ void deallocate(void *Ptr);
+
+ static uptr getBlockEnd(void *Ptr) {
+ return LargeBlock::getHeader(Ptr)->BlockEnd;
+ }
+
+ static uptr getBlockSize(void *Ptr) {
+ return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
+ }
+
+ void printStats() const;
+
+ void disable() { Mutex.lock(); }
+
+ void enable() { Mutex.unlock(); }
+
+ template <typename F> void iterateOverBlocks(F Callback) const {
+ for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev)
+ Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize());
+ }
+
+private:
+ HybridMutex Mutex;
+ LargeBlock::Header *Tail;
+ uptr AllocatedBytes;
+ uptr FreedBytes;
+ uptr LargestSize;
+ u32 NumberOfAllocs;
+ u32 NumberOfFrees;
+ LocalStats Stats;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_SECONDARY_H_
diff --git a/lib/scudo/standalone/size_class_map.h b/lib/scudo/standalone/size_class_map.h
new file mode 100644
index 000000000000..b7df54cf8098
--- /dev/null
+++ b/lib/scudo/standalone/size_class_map.h
@@ -0,0 +1,149 @@
+//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SIZE_CLASS_MAP_H_
+#define SCUDO_SIZE_CLASS_MAP_H_
+
+#include "common.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassMap maps allocation sizes into size classes and back, in an
+// efficient table-free manner.
+//
+// Class 0 is a special class that doesn't abide by the same rules as other
+// classes. The allocator uses it to hold batches.
+//
+// The other sizes are controlled by the template parameters:
+// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
+// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
+// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
+// 2^MidSizeLog bytes.
+// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
+// eg. with NumBits==3 all size classes after 2^MidSizeLog look like
+// 0b1xx0..0 (where x is either 0 or 1).
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that can be cached per-thread:
+// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
+// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
+
+template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog,
+ u32 MaxNumCachedHintT, u8 MaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr MinSize = 1UL << MinSizeLog;
+ static const uptr MidSize = 1UL << MidSizeLog;
+ static const uptr MidClass = MidSize / MinSize;
+ static const u8 S = NumBits - 1;
+ static const uptr M = (1UL << S) - 1;
+
+public:
+ static const u32 MaxNumCachedHint = MaxNumCachedHintT;
+
+ static const uptr MaxSize = 1UL << MaxSizeLog;
+ static const uptr NumClasses =
+ MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
+ COMPILER_CHECK(NumClasses <= 256);
+ static const uptr LargestClassId = NumClasses - 1;
+ static const uptr BatchClassId = 0;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ DCHECK_NE(ClassId, BatchClassId);
+ if (ClassId <= MidClass)
+ return ClassId << MinSizeLog;
+ ClassId -= MidClass;
+ const uptr T = MidSize << (ClassId >> S);
+ return T + (T >> S) * (ClassId & M);
+ }
+
+ static uptr getClassIdBySize(uptr Size) {
+ DCHECK_LE(Size, MaxSize);
+ if (Size <= MidSize)
+ return (Size + MinSize - 1) >> MinSizeLog;
+ const uptr L = getMostSignificantSetBitIndex(Size);
+ const uptr HBits = (Size >> (L - S)) & M;
+ const uptr LBits = Size & ((1UL << (L - S)) - 1);
+ const uptr L1 = L - MidSizeLog;
+ return MidClass + (L1 << S) + HBits + (LBits > 0);
+ }
+
+ static u32 getMaxCachedHint(uptr Size) {
+ DCHECK_LE(Size, MaxSize);
+ DCHECK_NE(Size, 0);
+ u32 N;
+ // Force a 32-bit division if the template parameters allow for it.
+ if (MaxBytesCachedLog > 31 || MaxSizeLog > 31)
+ N = static_cast<u32>((1UL << MaxBytesCachedLog) / Size);
+ else
+ N = (1U << MaxBytesCachedLog) / static_cast<u32>(Size);
+ return Max(1U, Min(MaxNumCachedHint, N));
+ }
+
+ static void print() {
+ uptr PrevS = 0;
+ uptr TotalCached = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == BatchClassId)
+ continue;
+ const uptr S = getSizeByClassId(I);
+ if (S >= MidSize / 2 && (S & (S - 1)) == 0)
+ Printf("\n");
+ const uptr D = S - PrevS;
+ const uptr P = PrevS ? (D * 100 / PrevS) : 0;
+ const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
+ const uptr Cached = getMaxCachedHint(S) * S;
+ Printf(
+ "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
+ I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached,
+ getClassIdBySize(S));
+ TotalCached += Cached;
+ PrevS = S;
+ }
+ Printf("Total Cached: %zu\n", TotalCached);
+ }
+
+ static void validate() {
+ for (uptr C = 0; C < NumClasses; C++) {
+ if (C == BatchClassId)
+ continue;
+ const uptr S = getSizeByClassId(C);
+ CHECK_NE(S, 0U);
+ CHECK_EQ(getClassIdBySize(S), C);
+ if (C < LargestClassId)
+ CHECK_EQ(getClassIdBySize(S + 1), C + 1);
+ CHECK_EQ(getClassIdBySize(S - 1), C);
+ CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1));
+ }
+ // Do not perform the loop if the maximum size is too large.
+ if (MaxSizeLog > 19)
+ return;
+ for (uptr S = 1; S <= MaxSize; S++) {
+ const uptr C = getClassIdBySize(S);
+ CHECK_LT(C, NumClasses);
+ CHECK_GE(getSizeByClassId(C), S);
+ if (C > 0)
+ CHECK_LT(getSizeByClassId(C - 1), S);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap;
+
+// TODO(kostyak): further tune class maps for Android & Fuchsia.
+#if SCUDO_WORDSIZE == 64U
+typedef SizeClassMap<3, 5, 8, 15, 8, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 5, 8, 16, 14, 12> AndroidSizeClassMap;
+#else
+typedef SizeClassMap<3, 4, 7, 15, 8, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 4, 7, 16, 14, 12> AndroidSizeClassMap;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_SIZE_CLASS_MAP_H_
diff --git a/lib/scudo/standalone/stats.h b/lib/scudo/standalone/stats.h
new file mode 100644
index 000000000000..12436756226b
--- /dev/null
+++ b/lib/scudo/standalone/stats.h
@@ -0,0 +1,105 @@
+//===-- stats.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STATS_H_
+#define SCUDO_STATS_H_
+
+#include "atomic_helpers.h"
+#include "mutex.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// Memory allocator statistics
+enum StatType { StatAllocated, StatMapped, StatCount };
+
+typedef uptr StatCounters[StatCount];
+
+// Per-thread stats, live in per-thread cache. We use atomics so that the
+// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
+// lock, because those are expensive operations , and we only care for the stats
+// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
+// LocalStats::add'ing, this is OK, we will still get a meaningful number.
+class LocalStats {
+public:
+ void initLinkerInitialized() {}
+ void init() { memset(this, 0, sizeof(*this)); }
+
+ void add(StatType I, uptr V) {
+ V += atomic_load_relaxed(&StatsArray[I]);
+ atomic_store_relaxed(&StatsArray[I], V);
+ }
+
+ void sub(StatType I, uptr V) {
+ V = atomic_load_relaxed(&StatsArray[I]) - V;
+ atomic_store_relaxed(&StatsArray[I], V);
+ }
+
+ void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
+
+ uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
+
+private:
+ friend class GlobalStats;
+ atomic_uptr StatsArray[StatCount];
+ LocalStats *Next;
+ LocalStats *Prev;
+};
+
+// Global stats, used for aggregation and querying.
+class GlobalStats : public LocalStats {
+public:
+ void initLinkerInitialized() {
+ Next = this;
+ Prev = this;
+ }
+ void init() {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized();
+ }
+
+ void link(LocalStats *S) {
+ ScopedLock L(Mutex);
+ S->Next = Next;
+ S->Prev = this;
+ Next->Prev = S;
+ Next = S;
+ }
+
+ void unlink(LocalStats *S) {
+ ScopedLock L(Mutex);
+ S->Prev->Next = S->Next;
+ S->Next->Prev = S->Prev;
+ for (uptr I = 0; I < StatCount; I++)
+ add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
+ }
+
+ void get(uptr *S) const {
+ memset(S, 0, StatCount * sizeof(uptr));
+ ScopedLock L(Mutex);
+ const LocalStats *Stats = this;
+ for (;;) {
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] += Stats->get(static_cast<StatType>(I));
+ Stats = Stats->Next;
+ if (Stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
+ }
+
+private:
+ mutable HybridMutex Mutex;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_STATS_H_
diff --git a/lib/scudo/standalone/string_utils.cc b/lib/scudo/standalone/string_utils.cc
new file mode 100644
index 000000000000..f0068afc1e8b
--- /dev/null
+++ b/lib/scudo/standalone/string_utils.cc
@@ -0,0 +1,236 @@
+//===-- string_utils.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "string_utils.h"
+#include "common.h"
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <string.h>
+
+namespace scudo {
+
+static int appendChar(char **Buffer, const char *BufferEnd, char C) {
+ if (*Buffer < BufferEnd) {
+ **Buffer = C;
+ (*Buffer)++;
+ }
+ return 1;
+}
+
+// Appends number in a given Base to buffer. If its length is less than
+// |MinNumberLength|, it is padded with leading zeroes or spaces, depending
+// on the value of |PadWithZero|.
+static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
+ u8 Base, u8 MinNumberLength, bool PadWithZero,
+ bool Negative, bool Upper) {
+ constexpr uptr MaxLen = 30;
+ RAW_CHECK(Base == 10 || Base == 16);
+ RAW_CHECK(Base == 10 || !Negative);
+ RAW_CHECK(AbsoluteValue || !Negative);
+ RAW_CHECK(MinNumberLength < MaxLen);
+ int Res = 0;
+ if (Negative && MinNumberLength)
+ --MinNumberLength;
+ if (Negative && PadWithZero)
+ Res += appendChar(Buffer, BufferEnd, '-');
+ uptr NumBuffer[MaxLen];
+ int Pos = 0;
+ do {
+ RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
+ "appendNumber buffer overflow");
+ NumBuffer[Pos++] = AbsoluteValue % Base;
+ AbsoluteValue /= Base;
+ } while (AbsoluteValue > 0);
+ if (Pos < MinNumberLength) {
+ memset(&NumBuffer[Pos], 0,
+ sizeof(NumBuffer[0]) * static_cast<uptr>(MinNumberLength - Pos));
+ Pos = MinNumberLength;
+ }
+ RAW_CHECK(Pos > 0);
+ Pos--;
+ for (; Pos >= 0 && NumBuffer[Pos] == 0; Pos--) {
+ char c = (PadWithZero || Pos == 0) ? '0' : ' ';
+ Res += appendChar(Buffer, BufferEnd, c);
+ }
+ if (Negative && !PadWithZero)
+ Res += appendChar(Buffer, BufferEnd, '-');
+ for (; Pos >= 0; Pos--) {
+ char Digit = static_cast<char>(NumBuffer[Pos]);
+ Digit = static_cast<char>((Digit < 10) ? '0' + Digit
+ : (Upper ? 'A' : 'a') + Digit - 10);
+ Res += appendChar(Buffer, BufferEnd, Digit);
+ }
+ return Res;
+}
+
+static int appendUnsigned(char **Buffer, const char *BufferEnd, u64 Num,
+ u8 Base, u8 MinNumberLength, bool PadWithZero,
+ bool Upper) {
+ return appendNumber(Buffer, BufferEnd, Num, Base, MinNumberLength,
+ PadWithZero, /*Negative=*/false, Upper);
+}
+
+static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
+ u8 MinNumberLength, bool PadWithZero) {
+ const bool Negative = (Num < 0);
+ return appendNumber(Buffer, BufferEnd,
+ static_cast<u64>(Negative ? -Num : Num), 10,
+ MinNumberLength, PadWithZero, Negative,
+ /*Upper=*/false);
+}
+
+// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
+// interpret Width == 0 as "no Width requested":
+// Width == 0 - no Width requested
+// Width < 0 - left-justify S within and pad it to -Width chars, if necessary
+// Width > 0 - right-justify S, not implemented yet
+static int appendString(char **Buffer, const char *BufferEnd, int Width,
+ int MaxChars, const char *S) {
+ if (!S)
+ S = "<null>";
+ int Res = 0;
+ for (; *S; S++) {
+ if (MaxChars >= 0 && Res >= MaxChars)
+ break;
+ Res += appendChar(Buffer, BufferEnd, *S);
+ }
+ // Only the left justified strings are supported.
+ while (Width < -Res)
+ Res += appendChar(Buffer, BufferEnd, ' ');
+ return Res;
+}
+
+static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
+ int Res = 0;
+ Res += appendString(Buffer, BufferEnd, 0, -1, "0x");
+ Res += appendUnsigned(Buffer, BufferEnd, ptr_value, 16,
+ SCUDO_POINTER_FORMAT_LENGTH, /*PadWithZero=*/true,
+ /*Upper=*/false);
+ return Res;
+}
+
+int formatString(char *Buffer, uptr BufferLength, const char *Format,
+ va_list Args) {
+ UNUSED static const char *PrintfFormatsHelp =
+ "Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+ RAW_CHECK(Format);
+ RAW_CHECK(BufferLength > 0);
+ const char *BufferEnd = &Buffer[BufferLength - 1];
+ const char *Cur = Format;
+ int Res = 0;
+ for (; *Cur; Cur++) {
+ if (*Cur != '%') {
+ Res += appendChar(&Buffer, BufferEnd, *Cur);
+ continue;
+ }
+ Cur++;
+ const bool LeftJustified = *Cur == '-';
+ if (LeftJustified)
+ Cur++;
+ bool HaveWidth = (*Cur >= '0' && *Cur <= '9');
+ const bool PadWithZero = (*Cur == '0');
+ u8 Width = 0;
+ if (HaveWidth) {
+ while (*Cur >= '0' && *Cur <= '9')
+ Width = static_cast<u8>(Width * 10 + *Cur++ - '0');
+ }
+ const bool HavePrecision = (Cur[0] == '.' && Cur[1] == '*');
+ int Precision = -1;
+ if (HavePrecision) {
+ Cur += 2;
+ Precision = va_arg(Args, int);
+ }
+ const bool HaveZ = (*Cur == 'z');
+ Cur += HaveZ;
+ const bool HaveLL = !HaveZ && (Cur[0] == 'l' && Cur[1] == 'l');
+ Cur += HaveLL * 2;
+ s64 DVal;
+ u64 UVal;
+ const bool HaveLength = HaveZ || HaveLL;
+ const bool HaveFlags = HaveWidth || HaveLength;
+ // At the moment only %s supports precision and left-justification.
+ CHECK(!((Precision >= 0 || LeftJustified) && *Cur != 's'));
+ switch (*Cur) {
+ case 'd': {
+ DVal = HaveLL ? va_arg(Args, s64)
+ : HaveZ ? va_arg(Args, sptr) : va_arg(Args, int);
+ Res += appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
+ break;
+ }
+ case 'u':
+ case 'x':
+ case 'X': {
+ UVal = HaveLL ? va_arg(Args, u64)
+ : HaveZ ? va_arg(Args, uptr) : va_arg(Args, unsigned);
+ const bool Upper = (*Cur == 'X');
+ Res += appendUnsigned(&Buffer, BufferEnd, UVal, (*Cur == 'u') ? 10 : 16,
+ Width, PadWithZero, Upper);
+ break;
+ }
+ case 'p': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res += appendPointer(&Buffer, BufferEnd, va_arg(Args, uptr));
+ break;
+ }
+ case 's': {
+ RAW_CHECK_MSG(!HaveLength, PrintfFormatsHelp);
+ // Only left-justified Width is supported.
+ CHECK(!HaveWidth || LeftJustified);
+ Res += appendString(&Buffer, BufferEnd, LeftJustified ? -Width : Width,
+ Precision, va_arg(Args, char *));
+ break;
+ }
+ case 'c': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res +=
+ appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
+ break;
+ }
+ case '%': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res += appendChar(&Buffer, BufferEnd, '%');
+ break;
+ }
+ default: {
+ RAW_CHECK_MSG(false, PrintfFormatsHelp);
+ }
+ }
+ }
+ RAW_CHECK(Buffer <= BufferEnd);
+ appendChar(&Buffer, BufferEnd + 1, '\0');
+ return Res;
+}
+
+void ScopedString::append(const char *Format, va_list Args) {
+ CHECK_LT(Length, String.size());
+ formatString(String.data() + Length, String.size() - Length, Format, Args);
+ Length += strlen(String.data() + Length);
+ CHECK_LT(Length, String.size());
+}
+
+FORMAT(2, 3)
+void ScopedString::append(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ append(Format, Args);
+ va_end(Args);
+}
+
+FORMAT(1, 2)
+void Printf(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ ScopedString Msg(512);
+ Msg.append(Format, Args);
+ outputRaw(Msg.data());
+ va_end(Args);
+}
+
+} // namespace scudo
diff --git a/lib/scudo/standalone/string_utils.h b/lib/scudo/standalone/string_utils.h
new file mode 100644
index 000000000000..aea7b3ffd7a5
--- /dev/null
+++ b/lib/scudo/standalone/string_utils.h
@@ -0,0 +1,42 @@
+//===-- string_utils.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STRING_UTILS_H_
+#define SCUDO_STRING_UTILS_H_
+
+#include "internal_defs.h"
+#include "vector.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedString {
+public:
+ explicit ScopedString(uptr MaxLength) : String(MaxLength), Length(0) {
+ String[0] = '\0';
+ }
+ uptr length() { return Length; }
+ const char *data() { return String.data(); }
+ void clear() {
+ String[0] = '\0';
+ Length = 0;
+ }
+ void append(const char *Format, va_list Args);
+ void append(const char *Format, ...);
+
+private:
+ Vector<char> String;
+ uptr Length;
+};
+
+void Printf(const char *Format, ...);
+
+} // namespace scudo
+
+#endif // SCUDO_STRING_UTILS_H_
diff --git a/lib/scudo/standalone/tsd.h b/lib/scudo/standalone/tsd.h
new file mode 100644
index 000000000000..f24ff01960fb
--- /dev/null
+++ b/lib/scudo/standalone/tsd.h
@@ -0,0 +1,66 @@
+//===-- tsd.h ---------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#define SCUDO_TSD_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
+
+// With some build setups, this might still not be defined.
+#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
+#define PTHREAD_DESTRUCTOR_ITERATIONS 4
+#endif
+
+namespace scudo {
+
+template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
+ typename Allocator::CacheT Cache;
+ typename Allocator::QuarantineCacheT QuarantineCache;
+ u8 DestructorIterations;
+
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initCache(&Cache);
+ DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void commitBack(Allocator *Instance) { Instance->commitBack(this); }
+
+ INLINE bool tryLock() {
+ if (Mutex.tryLock()) {
+ atomic_store_relaxed(&Precedence, 0);
+ return true;
+ }
+ if (atomic_load_relaxed(&Precedence) == 0)
+ atomic_store_relaxed(
+ &Precedence,
+ static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
+ return false;
+ }
+ INLINE void lock() {
+ atomic_store_relaxed(&Precedence, 0);
+ Mutex.lock();
+ }
+ INLINE void unlock() { Mutex.unlock(); }
+ INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+
+private:
+ HybridMutex Mutex;
+ atomic_uptr Precedence;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_H_
diff --git a/lib/scudo/standalone/tsd_exclusive.h b/lib/scudo/standalone/tsd_exclusive.h
new file mode 100644
index 000000000000..18cce1c56af8
--- /dev/null
+++ b/lib/scudo/standalone/tsd_exclusive.h
@@ -0,0 +1,118 @@
+//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_EXCLUSIVE_H_
+#define SCUDO_TSD_EXCLUSIVE_H_
+
+#include "tsd.h"
+
+#include <pthread.h>
+
+namespace scudo {
+
+enum class ThreadState : u8 {
+ NotInitialized = 0,
+ Initialized,
+ TornDown,
+};
+
+template <class Allocator> void teardownThread(void *Ptr);
+
+template <class Allocator> struct TSDRegistryExT {
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initLinkerInitialized();
+ CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
+ FallbackTSD = reinterpret_cast<TSD<Allocator> *>(
+ map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd"));
+ FallbackTSD->initLinkerInitialized(Instance);
+ Initialized = true;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
+ if (LIKELY(State != ThreadState::NotInitialized))
+ return;
+ initThread(Instance, MinimalInit);
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ if (LIKELY(State == ThreadState::Initialized)) {
+ *UnlockRequired = false;
+ return &ThreadTSD;
+ }
+ DCHECK(FallbackTSD);
+ FallbackTSD->lock();
+ *UnlockRequired = true;
+ return FallbackTSD;
+ }
+
+private:
+ void initOnceMaybe(Allocator *Instance) {
+ ScopedLock L(Mutex);
+ if (Initialized)
+ return;
+ initLinkerInitialized(Instance); // Sets Initialized.
+ }
+
+ // Using minimal initialization allows for global initialization while keeping
+ // the thread specific structure untouched. The fallback structure will be
+ // used instead.
+ NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
+ initOnceMaybe(Instance);
+ if (MinimalInit)
+ return;
+ CHECK_EQ(
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
+ ThreadTSD.initLinkerInitialized(Instance);
+ State = ThreadState::Initialized;
+ }
+
+ pthread_key_t PThreadKey;
+ bool Initialized;
+ TSD<Allocator> *FallbackTSD;
+ HybridMutex Mutex;
+ static THREADLOCAL ThreadState State;
+ static THREADLOCAL TSD<Allocator> ThreadTSD;
+
+ friend void teardownThread<Allocator>(void *Ptr);
+};
+
+template <class Allocator>
+THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
+template <class Allocator>
+THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;
+
+template <class Allocator> void teardownThread(void *Ptr) {
+ typedef TSDRegistryExT<Allocator> TSDRegistryT;
+ Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
+ // The glibc POSIX thread-local-storage deallocation routine calls user
+ // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+ // We want to be called last since other destructors might call free and the
+ // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+ // quarantine and swallowing the cache.
+ if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
+ TSDRegistryT::ThreadTSD.DestructorIterations--;
+ // If pthread_setspecific fails, we will go ahead with the teardown.
+ if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
+ Ptr) == 0))
+ return;
+ }
+ TSDRegistryT::ThreadTSD.commitBack(Instance);
+ TSDRegistryT::State = ThreadState::TornDown;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_EXCLUSIVE_H_
diff --git a/lib/scudo/standalone/tsd_shared.h b/lib/scudo/standalone/tsd_shared.h
new file mode 100644
index 000000000000..0f0a83a3eed4
--- /dev/null
+++ b/lib/scudo/standalone/tsd_shared.h
@@ -0,0 +1,169 @@
+//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_SHARED_H_
+#define SCUDO_TSD_SHARED_H_
+
+#include "linux.h" // for getAndroidTlsPtr()
+#include "tsd.h"
+
+#include <pthread.h>
+
+namespace scudo {
+
+template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initLinkerInitialized();
+ CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS
+ NumberOfTSDs = Min(Max(1U, getNumberOfCPUs()), MaxTSDCount);
+ TSDs = reinterpret_cast<TSD<Allocator> *>(
+ map(nullptr, sizeof(TSD<Allocator>) * NumberOfTSDs, "scudo:tsd"));
+ for (u32 I = 0; I < NumberOfTSDs; I++)
+ TSDs[I].initLinkerInitialized(Instance);
+ // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
+ // array of TSDs in a random order. For details, see:
+ // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
+ for (u32 I = 0; I < NumberOfTSDs; I++) {
+ u32 A = I + 1;
+ u32 B = NumberOfTSDs;
+ // Find the GCD between I + 1 and NumberOfTSDs. If 1, they are coprimes.
+ while (B != 0) {
+ const u32 T = A;
+ A = B;
+ B = T % B;
+ }
+ if (A == 1)
+ CoPrimes[NumberOfCoPrimes++] = I + 1;
+ }
+ Initialized = true;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(TSDs),
+ sizeof(TSD<Allocator>) * NumberOfTSDs);
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
+ UNUSED bool MinimalInit) {
+ if (LIKELY(getCurrentTSD()))
+ return;
+ initThread(Instance);
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ TSD<Allocator> *TSD = getCurrentTSD();
+ DCHECK(TSD);
+ *UnlockRequired = true;
+ // Try to lock the currently associated context.
+ if (TSD->tryLock())
+ return TSD;
+ // If that fails, go down the slow path.
+ return getTSDAndLockSlow(TSD);
+ }
+
+private:
+ ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
+#if SCUDO_ANDROID
+ *getAndroidTlsPtr() = reinterpret_cast<uptr>(CurrentTSD);
+#elif SCUDO_LINUX
+ ThreadTSD = CurrentTSD;
+#else
+ CHECK_EQ(
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(CurrentTSD)),
+ 0);
+#endif
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
+#if SCUDO_ANDROID
+ return reinterpret_cast<TSD<Allocator> *>(*getAndroidTlsPtr());
+#elif SCUDO_LINUX
+ return ThreadTSD;
+#else
+ return reinterpret_cast<TSD<Allocator> *>(pthread_getspecific(PThreadKey));
+#endif
+ }
+
+ void initOnceMaybe(Allocator *Instance) {
+ ScopedLock L(Mutex);
+ if (Initialized)
+ return;
+ initLinkerInitialized(Instance); // Sets Initialized.
+ }
+
+ NOINLINE void initThread(Allocator *Instance) {
+ initOnceMaybe(Instance);
+ // Initial context assignment is done in a plain round-robin fashion.
+ const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
+ setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+ }
+
+ NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
+ if (MaxTSDCount > 1U && NumberOfTSDs > 1U) {
+ // Use the Precedence of the current TSD as our random seed. Since we are
+ // in the slow path, it means that tryLock failed, and as a result it's
+ // very likely that said Precedence is non-zero.
+ u32 RandState = static_cast<u32>(CurrentTSD->getPrecedence());
+ const u32 R = getRandomU32(&RandState);
+ const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
+ u32 Index = R % NumberOfTSDs;
+ uptr LowestPrecedence = UINTPTR_MAX;
+ TSD<Allocator> *CandidateTSD = nullptr;
+ // Go randomly through at most 4 contexts and find a candidate.
+ for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+ if (TSDs[Index].tryLock()) {
+ setCurrentTSD(&TSDs[Index]);
+ return &TSDs[Index];
+ }
+ const uptr Precedence = TSDs[Index].getPrecedence();
+ // A 0 precedence here means another thread just locked this TSD.
+ if (Precedence && Precedence < LowestPrecedence) {
+ CandidateTSD = &TSDs[Index];
+ LowestPrecedence = Precedence;
+ }
+ Index += Inc;
+ if (Index >= NumberOfTSDs)
+ Index -= NumberOfTSDs;
+ }
+ if (CandidateTSD) {
+ CandidateTSD->lock();
+ setCurrentTSD(CandidateTSD);
+ return CandidateTSD;
+ }
+ }
+ // Last resort, stick with the current one.
+ CurrentTSD->lock();
+ return CurrentTSD;
+ }
+
+ pthread_key_t PThreadKey;
+ atomic_u32 CurrentIndex;
+ u32 NumberOfTSDs;
+ TSD<Allocator> *TSDs;
+ u32 NumberOfCoPrimes;
+ u32 CoPrimes[MaxTSDCount];
+ bool Initialized;
+ HybridMutex Mutex;
+#if SCUDO_LINUX && !SCUDO_ANDROID
+ static THREADLOCAL TSD<Allocator> *ThreadTSD;
+#endif
+};
+
+#if SCUDO_LINUX && !SCUDO_ANDROID
+template <class Allocator, u32 MaxTSDCount>
+THREADLOCAL TSD<Allocator>
+ *TSDRegistrySharedT<Allocator, MaxTSDCount>::ThreadTSD;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_SHARED_H_
diff --git a/lib/scudo/standalone/vector.h b/lib/scudo/standalone/vector.h
new file mode 100644
index 000000000000..3cb4005ed29c
--- /dev/null
+++ b/lib/scudo/standalone/vector.h
@@ -0,0 +1,118 @@
+//===-- vector.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_VECTOR_H_
+#define SCUDO_VECTOR_H_
+
+#include "common.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// A low-level vector based on map. May incur a significant memory overhead for
+// small vectors. The current implementation supports only POD types.
+template <typename T> class VectorNoCtor {
+public:
+ void init(uptr InitialCapacity) {
+ CapacityBytes = 0;
+ Size = 0;
+ Data = nullptr;
+ reserve(InitialCapacity);
+ }
+ void destroy() {
+ if (Data)
+ unmap(Data, CapacityBytes);
+ }
+ T &operator[](uptr I) {
+ DCHECK_LT(I, Size);
+ return Data[I];
+ }
+ const T &operator[](uptr I) const {
+ DCHECK_LT(I, Size);
+ return Data[I];
+ }
+ void push_back(const T &Element) {
+ DCHECK_LE(Size, capacity());
+ if (Size == capacity()) {
+ const uptr NewCapacity = roundUpToPowerOfTwo(Size + 1);
+ reallocate(NewCapacity);
+ }
+ memcpy(&Data[Size++], &Element, sizeof(T));
+ }
+ T &back() {
+ DCHECK_GT(Size, 0);
+ return Data[Size - 1];
+ }
+ void pop_back() {
+ DCHECK_GT(Size, 0);
+ Size--;
+ }
+ uptr size() const { return Size; }
+ const T *data() const { return Data; }
+ T *data() { return Data; }
+ uptr capacity() const { return CapacityBytes / sizeof(T); }
+ void reserve(uptr NewSize) {
+ // Never downsize internal buffer.
+ if (NewSize > capacity())
+ reallocate(NewSize);
+ }
+ void resize(uptr NewSize) {
+ if (NewSize > Size) {
+ reserve(NewSize);
+ memset(&Data[Size], 0, sizeof(T) * (NewSize - Size));
+ }
+ Size = NewSize;
+ }
+
+ void clear() { Size = 0; }
+ bool empty() const { return size() == 0; }
+
+ const T *begin() const { return data(); }
+ T *begin() { return data(); }
+ const T *end() const { return data() + size(); }
+ T *end() { return data() + size(); }
+
+private:
+ void reallocate(uptr NewCapacity) {
+ DCHECK_GT(NewCapacity, 0);
+ DCHECK_LE(Size, NewCapacity);
+ const uptr NewCapacityBytes =
+ roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
+ T *NewData = (T *)map(nullptr, NewCapacityBytes, "scudo:vector");
+ if (Data) {
+ memcpy(NewData, Data, Size * sizeof(T));
+ unmap(Data, CapacityBytes);
+ }
+ Data = NewData;
+ CapacityBytes = NewCapacityBytes;
+ }
+
+ T *Data;
+ uptr CapacityBytes;
+ uptr Size;
+};
+
+template <typename T> class Vector : public VectorNoCtor<T> {
+public:
+ Vector() { VectorNoCtor<T>::init(1); }
+ explicit Vector(uptr Count) {
+ VectorNoCtor<T>::init(Count);
+ this->resize(Count);
+ }
+ ~Vector() { VectorNoCtor<T>::destroy(); }
+ // Disallow copies and moves.
+ Vector(const Vector &) = delete;
+ Vector &operator=(const Vector &) = delete;
+ Vector(Vector &&) = delete;
+ Vector &operator=(Vector &&) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_VECTOR_H_
diff --git a/lib/scudo/standalone/wrappers_c.cc b/lib/scudo/standalone/wrappers_c.cc
new file mode 100644
index 000000000000..5908c600be33
--- /dev/null
+++ b/lib/scudo/standalone/wrappers_c.cc
@@ -0,0 +1,39 @@
+//===-- wrappers_c.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+static scudo::Allocator<scudo::Config> Allocator;
+// Pointer to the static allocator so that the C++ wrappers can access it.
+// Technically we could have a completely separated heap for C & C++ but in
+// reality the amount of cross pollination between the two is staggering.
+scudo::Allocator<scudo::Config> *AllocatorPtr = &Allocator;
+
+extern "C" {
+
+#define SCUDO_PREFIX(name) name
+#define SCUDO_ALLOCATOR Allocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
+
+} // extern "C"
+
+#endif // !SCUDO_ANDROID || !_BIONIC
diff --git a/lib/scudo/standalone/wrappers_c.h b/lib/scudo/standalone/wrappers_c.h
new file mode 100644
index 000000000000..33a0c53cec03
--- /dev/null
+++ b/lib/scudo/standalone/wrappers_c.h
@@ -0,0 +1,52 @@
+//===-- wrappers_c.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_WRAPPERS_C_H_
+#define SCUDO_WRAPPERS_C_H_
+
+#include "platform.h"
+#include "stats.h"
+
+// Bionic's struct mallinfo consists of size_t (mallinfo(3) uses int).
+#if SCUDO_ANDROID
+typedef size_t __scudo_mallinfo_data_t;
+#else
+typedef int __scudo_mallinfo_data_t;
+#endif
+
+struct __scudo_mallinfo {
+ __scudo_mallinfo_data_t arena;
+ __scudo_mallinfo_data_t ordblks;
+ __scudo_mallinfo_data_t smblks;
+ __scudo_mallinfo_data_t hblks;
+ __scudo_mallinfo_data_t hblkhd;
+ __scudo_mallinfo_data_t usmblks;
+ __scudo_mallinfo_data_t fsmblks;
+ __scudo_mallinfo_data_t uordblks;
+ __scudo_mallinfo_data_t fordblks;
+ __scudo_mallinfo_data_t keepcost;
+};
+
+// Android sometimes includes malloc.h no matter what, which yields to
+// conflicting return types for mallinfo() if we use our own structure. So if
+// struct mallinfo is declared (#define courtesy of malloc.h), use it directly.
+#if STRUCT_MALLINFO_DECLARED
+#define SCUDO_MALLINFO mallinfo
+#else
+#define SCUDO_MALLINFO __scudo_mallinfo
+#endif
+
+#ifndef M_DECAY_TIME
+#define M_DECAY_TIME -100
+#endif
+
+#ifndef M_PURGE
+#define M_PURGE -101
+#endif
+
+#endif // SCUDO_WRAPPERS_C_H_
diff --git a/lib/scudo/standalone/wrappers_c.inc b/lib/scudo/standalone/wrappers_c.inc
new file mode 100644
index 000000000000..2beddc724800
--- /dev/null
+++ b/lib/scudo/standalone/wrappers_c.inc
@@ -0,0 +1,176 @@
+//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PREFIX
+#error "Define SCUDO_PREFIX prior to including this file!"
+#endif
+
+// malloc-type functions have to be aligned to std::max_align_t. This is
+// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
+// do not have to abide by the same requirement.
+#ifndef SCUDO_MALLOC_ALIGNMENT
+#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
+#endif
+
+INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
+ scudo::uptr Product;
+ if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = ENOMEM;
+ return nullptr;
+ }
+ scudo::reportCallocOverflow(nmemb, size);
+ }
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
+ SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+}
+
+INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
+ struct SCUDO_MALLINFO Info = {};
+ scudo::StatCounters Stats;
+ SCUDO_ALLOCATOR.getStats(Stats);
+ Info.uordblks =
+ static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
+ return Info;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+}
+
+#if SCUDO_ANDROID
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
+#else
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
+#endif
+ return SCUDO_ALLOCATOR.getUsableSize(ptr);
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
+ // Android rounds up the alignment to a power of two if it isn't one.
+ if (SCUDO_ANDROID) {
+ if (UNLIKELY(!alignment)) {
+ alignment = 1U;
+ } else {
+ if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
+ alignment = scudo::roundUpToPowerOfTwo(alignment);
+ }
+ } else {
+ if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = EINVAL;
+ return nullptr;
+ }
+ scudo::reportAlignmentNotPowerOfTwo(alignment);
+ }
+ }
+ return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
+ alignment);
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
+ size_t size) {
+ if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
+ if (!SCUDO_ALLOCATOR.canReturnNull())
+ scudo::reportInvalidPosixMemalignAlignment(alignment);
+ return EINVAL;
+ }
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
+ if (UNLIKELY(!Ptr))
+ return ENOMEM;
+ *memptr = Ptr;
+ return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = ENOMEM;
+ return nullptr;
+ }
+ scudo::reportPvallocOverflow(size);
+ }
+ // pvalloc(0) should allocate one page.
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size ? scudo::roundUpTo(size, PageSize) : PageSize,
+ scudo::Chunk::Origin::Memalign, PageSize));
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
+ if (!ptr)
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+ if (size == 0) {
+ SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+ return nullptr;
+ }
+ return scudo::setErrnoOnNull(
+ SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
+}
+
+// Bionic wants a function named PREFIX_iterate and not PREFIX_malloc_iterate
+// which is somewhat inconsistent with the rest, workaround that.
+#if SCUDO_ANDROID && _BIONIC
+#define SCUDO_ITERATE iterate
+#else
+#define SCUDO_ITERATE malloc_iterate
+#endif
+
+INTERFACE WEAK int SCUDO_PREFIX(SCUDO_ITERATE)(
+ uintptr_t base, size_t size,
+ void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
+ SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
+ return 0;
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
+ SCUDO_ALLOCATOR.disable();
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
+
+INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
+ if (param == M_DECAY_TIME) {
+ // TODO(kostyak): set release_to_os_interval_ms accordingly.
+ return 1;
+ } else if (param == M_PURGE) {
+ SCUDO_ALLOCATOR.releaseToOS();
+ return 1;
+ }
+ return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
+ size_t size) {
+ if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = EINVAL;
+ return nullptr;
+ }
+ scudo::reportInvalidAlignedAllocAlignment(alignment, size);
+ }
+ return scudo::setErrnoOnNull(
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(int, FILE *) {
+ errno = ENOTSUP;
+ return -1;
+}
diff --git a/lib/scudo/standalone/wrappers_c_bionic.cc b/lib/scudo/standalone/wrappers_c_bionic.cc
new file mode 100644
index 000000000000..f6e863deb973
--- /dev/null
+++ b/lib/scudo/standalone/wrappers_c_bionic.cc
@@ -0,0 +1,49 @@
+//===-- wrappers_c_bionic.cc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// This is only used when compiled as part of Bionic.
+#if SCUDO_ANDROID && _BIONIC
+
+#include "allocator_config.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+static scudo::Allocator<scudo::AndroidConfig> Allocator;
+static scudo::Allocator<scudo::AndroidSvelteConfig> SvelteAllocator;
+
+extern "C" {
+
+// Regular MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name)
+#define SCUDO_ALLOCATOR Allocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// Svelte MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_svelte_, name)
+#define SCUDO_ALLOCATOR SvelteAllocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// The following is the only function that will end up initializing both
+// allocators, which will result in a slight increase in memory footprint.
+INTERFACE void __scudo_print_stats(void) {
+ Allocator.printStats();
+ SvelteAllocator.printStats();
+}
+
+} // extern "C"
+
+#endif // SCUDO_ANDROID && _BIONIC
diff --git a/lib/scudo/standalone/wrappers_c_checks.h b/lib/scudo/standalone/wrappers_c_checks.h
new file mode 100644
index 000000000000..d4370d506e5e
--- /dev/null
+++ b/lib/scudo/standalone/wrappers_c_checks.h
@@ -0,0 +1,67 @@
+//===-- wrappers_c_checks.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKS_H_
+#define SCUDO_CHECKS_H_
+
+#include "common.h"
+
+#include <errno.h>
+
+#ifndef __has_builtin
+#define __has_builtin(X) 0
+#endif
+
+namespace scudo {
+
+// A common errno setting logic shared by almost all Scudo C wrappers.
+INLINE void *setErrnoOnNull(void *Ptr) {
+ if (UNLIKELY(!Ptr))
+ errno = ENOMEM;
+ return Ptr;
+}
+
+// Checks return true on failure.
+
+// Checks aligned_alloc() parameters, verifies that the alignment is a power of
+// two and that the size is a multiple of alignment.
+INLINE bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
+ return Alignment == 0 || !isPowerOfTwo(Alignment) ||
+ !isAligned(Size, Alignment);
+}
+
+// Checks posix_memalign() parameters, verifies that alignment is a power of two
+// and a multiple of sizeof(void *).
+INLINE bool checkPosixMemalignAlignment(uptr Alignment) {
+ return Alignment == 0 || !isPowerOfTwo(Alignment) ||
+ !isAligned(Alignment, sizeof(void *));
+}
+
+// Returns true if calloc(Size, N) overflows on Size*N calculation. Use a
+// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
+// costly division.
+INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
+#if __has_builtin(__builtin_umull_overflow)
+ return __builtin_umull_overflow(Size, N, Product);
+#else
+ *Product = Size * N;
+ if (!Size)
+ return false;
+ return (*Product / Size) != N;
+#endif
+}
+
+// Returns true if the size passed to pvalloc overflows when rounded to the next
+// multiple of PageSize.
+INLINE bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
+ return roundUpTo(Size, PageSize) < Size;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKS_H_
diff --git a/lib/scudo/standalone/wrappers_cpp.cc b/lib/scudo/standalone/wrappers_cpp.cc
new file mode 100644
index 000000000000..3ae1cdc05a06
--- /dev/null
+++ b/lib/scudo/standalone/wrappers_cpp.cc
@@ -0,0 +1,107 @@
+//===-- wrappers_cpp.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+
+#include <stdint.h>
+
+extern scudo::Allocator<scudo::Config> *AllocatorPtr;
+
+namespace std {
+struct nothrow_t {};
+enum class align_val_t : size_t {};
+} // namespace std
+
+INTERFACE WEAK void *operator new(size_t size) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void *operator new[](size_t size) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void *operator new(size_t size,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void *operator new[](size_t size,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+}
+
+INTERFACE WEAK void operator delete(void *ptr)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+ std::nothrow_t const &) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size);
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+ std::align_val_t align) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
+ std::nothrow_t const &)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size,
+ std::align_val_t align)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size,
+ std::align_val_t align) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
+ static_cast<scudo::uptr>(align));
+}
+
+#endif // !SCUDO_ANDROID || !_BIONIC