Importing rustc-1.38.0

Bug: 146571186
Change-Id: Idd23b6282b5f216d22a897103cac97284f84b416
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/CMakeLists.txt b/src/llvm-project/compiler-rt/lib/scudo/standalone/CMakeLists.txt
new file mode 100644
index 0000000..027b041
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/CMakeLists.txt
@@ -0,0 +1,133 @@
+add_compiler_rt_component(scudo_standalone)
+
+include_directories(../..)
+
+set(SCUDO_CFLAGS)
+
+list(APPEND SCUDO_CFLAGS
+  -Wall
+  -nostdinc++)
+
+# Remove -stdlib= which is unused when passing -nostdinc++.
+string(REGEX REPLACE "-stdlib=[a-zA-Z+]*" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
+
+append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding SCUDO_CFLAGS)
+
+append_list_if(COMPILER_RT_HAS_FVISIBILITY_HIDDEN_FLAG -fvisibility=hidden SCUDO_CFLAGS)
+
+if(COMPILER_RT_DEBUG)
+  list(APPEND SCUDO_CFLAGS -O0)
+else()
+  list(APPEND SCUDO_CFLAGS -O3)
+endif()
+
+set(SCUDO_LINK_FLAGS)
+
+list(APPEND SCUDO_LINK_FLAGS -Wl,-z,defs,-z,now,-z,relro)
+
+append_list_if(COMPILER_RT_HAS_NODEFAULTLIBS_FLAG -nodefaultlibs SCUDO_LINK_FLAGS)
+
+if(ANDROID)
+# Put the shared library in the global group. For more details, see
+# android-changes-for-ndk-developers.md#changes-to-library-search-order
+  append_list_if(COMPILER_RT_HAS_Z_GLOBAL -Wl,-z,global SCUDO_LINK_FLAGS)
+endif()
+
+set(SCUDO_HEADERS
+  allocator_config.h
+  atomic_helpers.h
+  bytemap.h
+  checksum.h
+  chunk.h
+  combined.h
+  flags.h
+  flags_parser.h
+  fuchsia.h
+  interface.h
+  internal_defs.h
+  linux.h
+  list.h
+  local_cache.h
+  mutex.h
+  platform.h
+  primary32.h
+  primary64.h
+  quarantine.h
+  release.h
+  report.h
+  secondary.h
+  size_class_map.h
+  stats.h
+  string_utils.h
+  tsd.h
+  tsd_exclusive.h
+  tsd_shared.h
+  vector.h
+  wrappers_c_checks.h
+  wrappers_c.h)
+
+set(SCUDO_SOURCES
+  checksum.cc
+  crc32_hw.cc
+  common.cc
+  flags.cc
+  flags_parser.cc
+  fuchsia.cc
+  linux.cc
+  report.cc
+  secondary.cc
+  string_utils.cc)
+
+# Enable the SSE 4.2 instruction set for crc32_hw.cc, if available.
+if (COMPILER_RT_HAS_MSSE4_2_FLAG)
+  set_source_files_properties(crc32_hw.cc PROPERTIES COMPILE_FLAGS -msse4.2)
+endif()
+
+# Enable the AArch64 CRC32 feature for crc32_hw.cc, if available.
+# Note that it is enabled by default starting with armv8.1-a.
+if (COMPILER_RT_HAS_MCRC_FLAG)
+  set_source_files_properties(crc32_hw.cc PROPERTIES COMPILE_FLAGS -mcrc)
+endif()
+
+set(SCUDO_SOURCES_C_WRAPPERS
+  wrappers_c.cc)
+
+set(SCUDO_SOURCES_CXX_WRAPPERS
+  wrappers_cpp.cc)
+
+if(COMPILER_RT_HAS_SCUDO_STANDALONE)
+  add_compiler_rt_object_libraries(RTScudoStandalone
+    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    CFLAGS ${SCUDO_CFLAGS})
+  add_compiler_rt_object_libraries(RTScudoStandaloneCWrappers
+    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES_C_WRAPPERS}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    CFLAGS ${SCUDO_CFLAGS})
+  add_compiler_rt_object_libraries(RTScudoStandaloneCxxWrappers
+    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES_CXX_WRAPPERS}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    CFLAGS ${SCUDO_CFLAGS})
+
+  add_compiler_rt_runtime(clang_rt.scudo_standalone
+    STATIC
+    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES} ${SCUDO_SOURCES_C_WRAPPERS}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    CFLAGS ${SCUDO_CFLAGS}
+    PARENT_TARGET scudo_standalone)
+  add_compiler_rt_runtime(clang_rt.scudo_standalone_cxx
+    STATIC
+    ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES_CXX_WRAPPERS}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    CFLAGS ${SCUDO_CFLAGS}
+    PARENT_TARGET scudo_standalone)
+
+  if(COMPILER_RT_INCLUDE_TESTS)
+    add_subdirectory(tests)
+  endif()
+endif()
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
new file mode 100644
index 0000000..06ec4f3
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -0,0 +1,80 @@
+//===-- allocator_config.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_CONFIG_H_
+#define SCUDO_ALLOCATOR_CONFIG_H_
+
+#include "combined.h"
+#include "common.h"
+#include "flags.h"
+#include "primary32.h"
+#include "primary64.h"
+#include "size_class_map.h"
+#include "tsd_exclusive.h"
+#include "tsd_shared.h"
+
+namespace scudo {
+
+// Default configurations for various platforms.
+
+struct DefaultConfig {
+  using SizeClassMap = DefaultSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+  // 1GB Regions
+  typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+#else
+  // 512KB regions
+  typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+#endif
+  template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+};
+
+struct AndroidConfig {
+  using SizeClassMap = AndroidSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+  // 1GB regions
+  typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+#else
+  // 512KB regions
+  typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+#endif
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
+};
+
+struct AndroidSvelteConfig {
+  using SizeClassMap = SvelteSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+  // 512MB regions
+  typedef SizeClassAllocator64<SizeClassMap, 29U> Primary;
+#else
+  // 256KB regions
+  typedef SizeClassAllocator32<SizeClassMap, 18U> Primary;
+#endif
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
+};
+
+struct FuchsiaConfig {
+  // 1GB Regions
+  typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary;
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs.
+};
+
+#if SCUDO_ANDROID
+typedef AndroidConfig Config;
+#elif SCUDO_FUCHSIA
+typedef FuchsiaConfig Config;
+#else
+typedef DefaultConfig Config;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_CONFIG_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
new file mode 100644
index 0000000..47037d7
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -0,0 +1,139 @@
+//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ATOMIC_H_
+#define SCUDO_ATOMIC_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+enum memory_order {
+  memory_order_relaxed = 0,
+  memory_order_consume = 1,
+  memory_order_acquire = 2,
+  memory_order_release = 3,
+  memory_order_acq_rel = 4,
+  memory_order_seq_cst = 5
+};
+COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
+COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
+COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
+COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
+COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
+COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
+
+struct atomic_u8 {
+  typedef u8 Type;
+  volatile Type ValDoNotUse;
+};
+
+struct atomic_u16 {
+  typedef u16 Type;
+  volatile Type ValDoNotUse;
+};
+
+struct atomic_s32 {
+  typedef s32 Type;
+  volatile Type ValDoNotUse;
+};
+
+struct atomic_u32 {
+  typedef u32 Type;
+  volatile Type ValDoNotUse;
+};
+
+struct atomic_u64 {
+  typedef u64 Type;
+  // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
+  ALIGNED(8) volatile Type ValDoNotUse;
+};
+
+struct atomic_uptr {
+  typedef uptr Type;
+  volatile Type ValDoNotUse;
+};
+
+template <typename T>
+INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
+  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+  typename T::Type V;
+  __atomic_load(&A->ValDoNotUse, &V, MO);
+  return V;
+}
+
+template <typename T>
+INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
+  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+  __atomic_store(&A->ValDoNotUse, &V, MO);
+}
+
+INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
+                                         memory_order MO) {
+  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+  return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
+                                         memory_order MO) {
+  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+  return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
+                                        memory_order MO) {
+  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+  typename T::Type R;
+  __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
+  return R;
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
+                                           typename T::Type Xchg,
+                                           memory_order MO) {
+  return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
+                                   __ATOMIC_RELAXED);
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
+                                         typename T::Type Xchg,
+                                         memory_order MO) {
+  return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
+                                   __ATOMIC_RELAXED);
+}
+
+// Clutter-reducing helpers.
+
+template <typename T>
+INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
+  return atomic_load(A, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
+  atomic_store(A, V, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_compare_exchange(volatile T *A,
+                                                typename T::Type Cmp,
+                                                typename T::Type Xchg) {
+  atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+  return Cmp;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_ATOMIC_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/bytemap.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/bytemap.h
new file mode 100644
index 0000000..caeeb2f
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/bytemap.h
@@ -0,0 +1,111 @@
+//===-- bytemap.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_BYTEMAP_H_
+#define SCUDO_BYTEMAP_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+namespace scudo {
+
+template <uptr Size> class FlatByteMap {
+public:
+  void initLinkerInitialized() {
+    Map = reinterpret_cast<u8 *>(map(nullptr, Size, "scudo:bytemap"));
+  }
+  void init() { initLinkerInitialized(); }
+
+  void unmapTestOnly() { unmap(reinterpret_cast<void *>(Map), Size); }
+
+  void set(uptr Index, u8 Value) {
+    DCHECK_LT(Index, Size);
+    DCHECK_EQ(0U, Map[Index]);
+    Map[Index] = Value;
+  }
+  u8 operator[](uptr Index) {
+    DCHECK_LT(Index, Size);
+    return Map[Index];
+  }
+
+private:
+  u8 *Map;
+};
+
+template <uptr Level1Size, uptr Level2Size> class TwoLevelByteMap {
+public:
+  void initLinkerInitialized() {
+    Level1Map = reinterpret_cast<atomic_uptr *>(
+        map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap"));
+  }
+  void init() {
+    Mutex.init();
+    initLinkerInitialized();
+  }
+
+  void reset() {
+    for (uptr I = 0; I < Level1Size; I++) {
+      u8 *P = get(I);
+      if (!P)
+        continue;
+      unmap(P, Level2Size);
+    }
+    memset(Level1Map, 0, sizeof(atomic_uptr) * Level1Size);
+  }
+
+  void unmapTestOnly() {
+    reset();
+    unmap(reinterpret_cast<void *>(Level1Map),
+          sizeof(atomic_uptr) * Level1Size);
+  }
+
+  uptr size() const { return Level1Size * Level2Size; }
+
+  void set(uptr Index, u8 Value) {
+    DCHECK_LT(Index, Level1Size * Level2Size);
+    u8 *Level2Map = getOrCreate(Index / Level2Size);
+    DCHECK_EQ(0U, Level2Map[Index % Level2Size]);
+    Level2Map[Index % Level2Size] = Value;
+  }
+
+  u8 operator[](uptr Index) const {
+    DCHECK_LT(Index, Level1Size * Level2Size);
+    u8 *Level2Map = get(Index / Level2Size);
+    if (!Level2Map)
+      return 0;
+    return Level2Map[Index % Level2Size];
+  }
+
+private:
+  u8 *get(uptr Index) const {
+    DCHECK_LT(Index, Level1Size);
+    return reinterpret_cast<u8 *>(
+        atomic_load(&Level1Map[Index], memory_order_acquire));
+  }
+
+  u8 *getOrCreate(uptr Index) {
+    u8 *Res = get(Index);
+    if (!Res) {
+      ScopedLock L(Mutex);
+      if (!(Res = get(Index))) {
+        Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap"));
+        atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res),
+                     memory_order_release);
+      }
+    }
+    return Res;
+  }
+
+  atomic_uptr *Level1Map;
+  HybridMutex Mutex;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_BYTEMAP_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cc
new file mode 100644
index 0000000..0896d5b
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cc
@@ -0,0 +1,70 @@
+//===-- checksum.cc ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+#include "atomic_helpers.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+#include <cpuid.h>
+#elif defined(__arm__) || defined(__aarch64__)
+#if SCUDO_FUCHSIA
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+#else
+#include <sys/auxv.h>
+#endif
+#endif
+
+namespace scudo {
+
+Checksum HashAlgorithm = {Checksum::BSD};
+
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
+#ifndef bit_SSE4_2
+#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+#endif
+
+bool hasHardwareCRC32() {
+  u32 Eax, Ebx = 0, Ecx = 0, Edx = 0;
+  __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
+  const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
+                       (Edx == signature_INTEL_edx) &&
+                       (Ecx == signature_INTEL_ecx);
+  const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) &&
+                     (Ecx == signature_AMD_ecx);
+  if (!IsIntel && !IsAMD)
+    return false;
+  __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
+  return !!(Ecx & bit_SSE4_2);
+}
+
+#elif defined(__arm__) || defined(__aarch64__)
+#ifndef AT_HWCAP
+#define AT_HWCAP 16
+#endif
+#ifndef HWCAP_CRC32
+#define HWCAP_CRC32 (1U << 7) // HWCAP_CRC32 is missing on older platforms.
+#endif
+
+bool hasHardwareCRC32() {
+#if SCUDO_FUCHSIA
+  u32 HWCap;
+  const zx_status_t Status =
+      zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+  if (Status != ZX_OK)
+    return false;
+  return !!(HWCap & ZX_ARM64_FEATURE_ISA_CRC32);
+#else
+  return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
+#endif // SCUDO_FUCHSIA
+}
+#endif // defined(__x86_64__) || defined(__i386__)
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h
new file mode 100644
index 0000000..092342f
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h
@@ -0,0 +1,54 @@
+//===-- checksum.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKSUM_H_
+#define SCUDO_CHECKSUM_H_
+
+#include "internal_defs.h"
+
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -msse4.2
+// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+
+#ifdef __SSE4_2__
+#include <smmintrin.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+#endif
+#ifdef __ARM_FEATURE_CRC32
+#include <arm_acle.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+#endif
+
+namespace scudo {
+
+enum class Checksum : u8 {
+  BSD = 0,
+  HardwareCRC32 = 1,
+};
+
+// BSD checksum, unlike a software CRC32, doesn't use any array lookup. We save
+// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
+// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
+// odds with CRC32, but enough for our needs.
+INLINE u16 computeBSDChecksum(u16 Sum, uptr Data) {
+  for (u8 I = 0; I < sizeof(Data); I++) {
+    Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
+    Sum = static_cast<u16>(Sum + (Data & 0xff));
+    Data >>= 8;
+  }
+  return Sum;
+}
+
+bool hasHardwareCRC32();
+WEAK u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKSUM_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
new file mode 100644
index 0000000..76ef661
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
@@ -0,0 +1,156 @@
+//===-- chunk.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHUNK_H_
+#define SCUDO_CHUNK_H_
+
+#include "platform.h"
+
+#include "atomic_helpers.h"
+#include "checksum.h"
+#include "common.h"
+#include "report.h"
+
+namespace scudo {
+
+extern Checksum HashAlgorithm;
+
+INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
+  // If the hardware CRC32 feature is defined here, it was enabled everywhere,
+  // as opposed to only for crc32_hw.cc. This means that other hardware specific
+  // instructions were likely emitted at other places, and as a result there is
+  // no reason to not use it here.
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+  u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
+  for (uptr I = 0; I < ArraySize; I++)
+    Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
+  return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+#else
+  if (HashAlgorithm == Checksum::HardwareCRC32) {
+    u32 Crc = computeHardwareCRC32(Seed, Value);
+    for (uptr I = 0; I < ArraySize; I++)
+      Crc = computeHardwareCRC32(Crc, Array[I]);
+    return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+  } else {
+    u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed & 0xffff), Value);
+    for (uptr I = 0; I < ArraySize; I++)
+      Checksum = computeBSDChecksum(Checksum, Array[I]);
+    return Checksum;
+  }
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+}
+
+namespace Chunk {
+
+// Note that in an ideal world, `State` and `Origin` should be `enum class`, and
+// the associated `UnpackedHeader` fields of their respective enum class type
+// but https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 prevents it from
+// happening, as it will error, complaining the number of bits is not enough.
+enum Origin : u8 {
+  Malloc = 0,
+  New = 1,
+  NewArray = 2,
+  Memalign = 3,
+};
+
+enum State : u8 { Available = 0, Allocated = 1, Quarantined = 2 };
+
+typedef u64 PackedHeader;
+// Update the 'Mask' constants to reflect changes in this structure.
+struct UnpackedHeader {
+  u64 Checksum : 16;
+  u64 ClassId : 8;
+  u64 SizeOrUnusedBytes : 20;
+  u8 State : 2;
+  u8 Origin : 2;
+  u64 Offset : 16;
+};
+typedef atomic_u64 AtomicPackedHeader;
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+// Those constants are required to silence some -Werror=conversion errors when
+// assigning values to the related bitfield variables.
+constexpr uptr ChecksumMask = (1UL << 16) - 1;
+constexpr uptr ClassIdMask = (1UL << 8) - 1;
+constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
+constexpr uptr StateMask = (1UL << 2) - 1;
+constexpr uptr OriginMask = (1UL << 2) - 1;
+constexpr uptr OffsetMask = (1UL << 16) - 1;
+
+constexpr uptr getHeaderSize() {
+  return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+  return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
+                                                getHeaderSize());
+}
+
+INLINE
+const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+  return reinterpret_cast<const AtomicPackedHeader *>(
+      reinterpret_cast<uptr>(Ptr) - getHeaderSize());
+}
+
+// We do not need a cryptographically strong hash for the checksum, but a CRC
+// type function that can alert us in the event a header is invalid or
+// corrupted. Ideally slightly better than a simple xor of all fields.
+static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
+                                        UnpackedHeader *Header) {
+  UnpackedHeader ZeroChecksumHeader = *Header;
+  ZeroChecksumHeader.Checksum = 0;
+  uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+  memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+  return computeChecksum(Cookie, reinterpret_cast<uptr>(Ptr), HeaderHolder,
+                         ARRAY_SIZE(HeaderHolder));
+}
+
+INLINE void storeHeader(u32 Cookie, void *Ptr,
+                        UnpackedHeader *NewUnpackedHeader) {
+  NewUnpackedHeader->Checksum =
+      computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+  PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+  atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
+}
+
+INLINE
+void loadHeader(u32 Cookie, const void *Ptr,
+                UnpackedHeader *NewUnpackedHeader) {
+  PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+  *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+  if (UNLIKELY(NewUnpackedHeader->Checksum !=
+               computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader)))
+    reportHeaderCorruption(const_cast<void *>(Ptr));
+}
+
+INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
+                                  UnpackedHeader *NewUnpackedHeader,
+                                  UnpackedHeader *OldUnpackedHeader) {
+  NewUnpackedHeader->Checksum =
+      computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+  PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+  PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
+  if (UNLIKELY(!atomic_compare_exchange_strong(
+          getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
+          memory_order_relaxed)))
+    reportHeaderRace(Ptr);
+}
+
+INLINE
+bool isValid(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+  PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+  *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+  return NewUnpackedHeader->Checksum ==
+         computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+}
+
+} // namespace Chunk
+
+} // namespace scudo
+
+#endif // SCUDO_CHUNK_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/combined.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
new file mode 100644
index 0000000..4c1c119
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
@@ -0,0 +1,557 @@
+//===-- combined.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMBINED_H_
+#define SCUDO_COMBINED_H_
+
+#include "chunk.h"
+#include "common.h"
+#include "flags.h"
+#include "flags_parser.h"
+#include "interface.h"
+#include "local_cache.h"
+#include "quarantine.h"
+#include "report.h"
+#include "secondary.h"
+#include "tsd.h"
+
+namespace scudo {
+
+template <class Params> class Allocator {
+public:
+  using PrimaryT = typename Params::Primary;
+  using CacheT = typename PrimaryT::CacheT;
+  typedef Allocator<Params> ThisT;
+  typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
+
+  struct QuarantineCallback {
+    explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
+        : Allocator(Instance), Cache(LocalCache) {}
+
+    // Chunk recycling function, returns a quarantined chunk to the backend,
+    // first making sure it hasn't been tampered with.
+    void recycle(void *Ptr) {
+      Chunk::UnpackedHeader Header;
+      Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+      if (UNLIKELY(Header.State != Chunk::State::Quarantined))
+        reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
+
+      Chunk::UnpackedHeader NewHeader = Header;
+      NewHeader.State = Chunk::State::Available;
+      Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+
+      void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
+      const uptr ClassId = Header.ClassId;
+      if (ClassId)
+        Cache.deallocate(ClassId, BlockBegin);
+      else
+        Allocator.Secondary.deallocate(BlockBegin);
+    }
+
+    // We take a shortcut when allocating a quarantine batch by working with the
+    // appropriate class ID instead of using Size. The compiler should optimize
+    // the class ID computation and work with the associated cache directly.
+    void *allocate(UNUSED uptr Size) {
+      const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+          sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+      void *Ptr = Cache.allocate(QuarantineClassId);
+      // Quarantine batch allocation failure is fatal.
+      if (UNLIKELY(!Ptr))
+        reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
+
+      Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
+                                     Chunk::getHeaderSize());
+      Chunk::UnpackedHeader Header = {};
+      Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
+      Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
+      Header.State = Chunk::State::Allocated;
+      Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
+
+      return Ptr;
+    }
+
+    void deallocate(void *Ptr) {
+      const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+          sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+      Chunk::UnpackedHeader Header;
+      Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+
+      if (UNLIKELY(Header.State != Chunk::State::Allocated))
+        reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+      DCHECK_EQ(Header.ClassId, QuarantineClassId);
+      DCHECK_EQ(Header.Offset, 0);
+      DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
+
+      Chunk::UnpackedHeader NewHeader = Header;
+      NewHeader.State = Chunk::State::Available;
+      Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+      Cache.deallocate(QuarantineClassId,
+                       reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+                                                Chunk::getHeaderSize()));
+    }
+
+  private:
+    ThisT &Allocator;
+    CacheT &Cache;
+  };
+
+  typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
+  typedef typename QuarantineT::CacheT QuarantineCacheT;
+
+  void initLinkerInitialized() {
+    performSanityChecks();
+
+    // Check if hardware CRC32 is supported in the binary and by the platform,
+    // if so, opt for the CRC32 hardware version of the checksum.
+    if (&computeHardwareCRC32 && hasHardwareCRC32())
+      HashAlgorithm = Checksum::HardwareCRC32;
+
+    if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
+      Cookie = static_cast<u32>(getMonotonicTime() ^
+                                (reinterpret_cast<uptr>(this) >> 4));
+
+    initFlags();
+    reportUnrecognizedFlags();
+
+    // Store some flags locally.
+    Options.MayReturnNull = getFlags()->may_return_null;
+    Options.ZeroContents = getFlags()->zero_contents;
+    Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
+    Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
+    Options.QuarantineMaxChunkSize = getFlags()->quarantine_max_chunk_size;
+
+    Stats.initLinkerInitialized();
+    Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
+    Secondary.initLinkerInitialized(&Stats);
+
+    Quarantine.init(getFlags()->quarantine_size_kb << 10,
+                    getFlags()->thread_local_quarantine_size_kb << 10);
+  }
+
+  void reset() { memset(this, 0, sizeof(*this)); }
+
+  void unmapTestOnly() {
+    TSDRegistry.unmapTestOnly();
+    Primary.unmapTestOnly();
+  }
+
+  TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
+
+  void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
+
+  // Release the resources used by a TSD, which involves:
+  // - draining the local quarantine cache to the global quarantine;
+  // - releasing the cached pointers back to the Primary;
+  // - unlinking the local stats from the global ones (destroying the cache does
+  //   the last two items).
+  void commitBack(TSD<ThisT> *TSD) {
+    Quarantine.drain(&TSD->QuarantineCache,
+                     QuarantineCallback(*this, TSD->Cache));
+    TSD->Cache.destroy(&Stats);
+  }
+
+  NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
+                          uptr Alignment = MinAlignment,
+                          bool ZeroContents = false) {
+    initThreadMaybe();
+
+    if (UNLIKELY(Alignment > MaxAlignment)) {
+      if (Options.MayReturnNull)
+        return nullptr;
+      reportAlignmentTooBig(Alignment, MaxAlignment);
+    }
+    if (UNLIKELY(Alignment < MinAlignment))
+      Alignment = MinAlignment;
+
+    // If the requested size happens to be 0 (more common than you might think),
+    // allocate 1 byte on top of the header. Then add the extra bytes required
+    // to fulfill the alignment requirements: we allocate enough to be sure that
+    // there will be an address in the block that will satisfy the alignment.
+    const uptr NeededSize =
+        Chunk::getHeaderSize() + roundUpTo(Size ? Size : 1, MinAlignment) +
+        ((Alignment > MinAlignment) ? (Alignment - Chunk::getHeaderSize()) : 0);
+
+    // Takes care of extravagantly large sizes as well as integer overflows.
+    if (UNLIKELY(Size >= MaxAllowedMallocSize ||
+                 NeededSize >= MaxAllowedMallocSize)) {
+      if (Options.MayReturnNull)
+        return nullptr;
+      reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
+    }
+
+    void *Block;
+    uptr ClassId;
+    uptr BlockEnd = 0;
+    if (PrimaryT::canAllocate(NeededSize)) {
+      ClassId = SizeClassMap::getClassIdBySize(NeededSize);
+      bool UnlockRequired;
+      auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+      Block = TSD->Cache.allocate(ClassId);
+      if (UnlockRequired)
+        TSD->unlock();
+    } else {
+      ClassId = 0;
+      Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd);
+    }
+
+    if (UNLIKELY(!Block)) {
+      if (Options.MayReturnNull)
+        return nullptr;
+      reportOutOfMemory(NeededSize);
+    }
+
+    // We only need to zero the contents for Primary backed allocations.
+    if ((ZeroContents || Options.ZeroContents) && ClassId)
+      memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
+
+    Chunk::UnpackedHeader Header = {};
+    uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+    // The following condition isn't necessarily "UNLIKELY".
+    if (!isAligned(UserPtr, Alignment)) {
+      const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
+      const uptr Offset = AlignedUserPtr - UserPtr;
+      Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
+      DCHECK_GT(Offset, 2 * sizeof(u32));
+      // The BlockMarker has no security purpose, but is specifically meant for
+      // the chunk iteration function that can be used in debugging situations.
+      // It is the only situation where we have to locate the start of a chunk
+      // based on its block address.
+      reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
+      reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
+      UserPtr = AlignedUserPtr;
+    }
+    Header.State = Chunk::State::Allocated;
+    Header.Origin = Origin & Chunk::OriginMask;
+    if (ClassId) {
+      Header.ClassId = ClassId & Chunk::ClassIdMask;
+      Header.SizeOrUnusedBytes = Size & Chunk::SizeOrUnusedBytesMask;
+    } else {
+      Header.SizeOrUnusedBytes =
+          (BlockEnd - (UserPtr + Size)) & Chunk::SizeOrUnusedBytesMask;
+    }
+    void *Ptr = reinterpret_cast<void *>(UserPtr);
+    Chunk::storeHeader(Cookie, Ptr, &Header);
+
+    if (&__scudo_allocate_hook)
+      __scudo_allocate_hook(Ptr, Size);
+
+    return Ptr;
+  }
+
+  NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
+                           UNUSED uptr Alignment = MinAlignment) {
+    // For a deallocation, we only ensure minimal initialization, meaning thread
+    // local data will be left uninitialized for now (when using ELF TLS). The
+    // fallback cache will be used instead. This is a workaround for a situation
+    // where the only heap operation performed in a thread would be a free past
+    // the TLS destructors, ending up in initialized thread specific data never
+    // being destroyed properly. Any other heap operation will do a full init.
+    initThreadMaybe(/*MinimalInit=*/true);
+
+    if (&__scudo_deallocate_hook)
+      __scudo_deallocate_hook(Ptr);
+
+    if (UNLIKELY(!Ptr))
+      return;
+    if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
+      reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
+
+    Chunk::UnpackedHeader Header;
+    Chunk::loadHeader(Cookie, Ptr, &Header);
+
+    if (UNLIKELY(Header.State != Chunk::State::Allocated))
+      reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+    if (Options.DeallocTypeMismatch) {
+      if (Header.Origin != Origin) {
+        // With the exception of memalign'd chunks, that can be still be free'd.
+        if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||
+                     Origin != Chunk::Origin::Malloc))
+          reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
+                                    Header.Origin, Origin);
+      }
+    }
+
+    const uptr Size = getSize(Ptr, &Header);
+    if (DeleteSize && Options.DeleteSizeMismatch) {
+      if (UNLIKELY(DeleteSize != Size))
+        reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
+    }
+
+    quarantineOrDeallocateChunk(Ptr, &Header, Size);
+  }
+
+  void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
+    initThreadMaybe();
+
+    // The following cases are handled by the C wrappers.
+    DCHECK_NE(OldPtr, nullptr);
+    DCHECK_NE(NewSize, 0);
+
+    if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
+      reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
+
+    Chunk::UnpackedHeader OldHeader;
+    Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
+
+    if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
+      reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
+
+    // Pointer has to be allocated with a malloc-type function. Some
+    // applications think that it is OK to realloc a memalign'ed pointer, which
+    // will trigger this check. It really isn't.
+    if (Options.DeallocTypeMismatch) {
+      if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc))
+        reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
+                                  OldHeader.Origin, Chunk::Origin::Malloc);
+    }
+
+    const uptr OldSize = getSize(OldPtr, &OldHeader);
+    // If the new size is identical to the old one, or lower but within an
+    // acceptable range, we just keep the old chunk, and update its header.
+    if (NewSize == OldSize)
+      return OldPtr;
+    if (NewSize < OldSize) {
+      const uptr Delta = OldSize - NewSize;
+      if (Delta < (SizeClassMap::MaxSize / 2)) {
+        Chunk::UnpackedHeader NewHeader = OldHeader;
+        NewHeader.SizeOrUnusedBytes =
+            (OldHeader.ClassId ? NewHeader.SizeOrUnusedBytes - Delta
+                               : NewHeader.SizeOrUnusedBytes + Delta) &
+            Chunk::SizeOrUnusedBytesMask;
+        Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
+        return OldPtr;
+      }
+    }
+
+    // Otherwise we allocate a new one, and deallocate the old one. Some
+    // allocators will allocate an even larger chunk (by a fixed factor) to
+    // allow for potential further in-place realloc. The gains of such a trick
+    // are currently unclear.
+    void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
+    if (NewPtr) {
+      memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
+      quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+    }
+    return NewPtr;
+  }
+
+  // TODO(kostyak): while this locks the Primary & Secondary, it still allows
+  //                pointers to be fetched from the TSD. We ultimately want to
+  //                lock the registry as well. For now, it's good enough.
+  void disable() {
+    initThreadMaybe();
+    Primary.disable();
+    Secondary.disable();
+  }
+
+  void enable() {
+    initThreadMaybe();
+    Secondary.enable();
+    Primary.enable();
+  }
+
+  void printStats() {
+    disable();
+    Primary.printStats();
+    Secondary.printStats();
+    Quarantine.printStats();
+    enable();
+  }
+
+  void releaseToOS() { Primary.releaseToOS(); }
+
+  // Iterate over all chunks and call a callback for all busy chunks located
+  // within the provided memory range. Said callback must not use this allocator
+  // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
+  void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
+                         void *Arg) {
+    initThreadMaybe();
+    const uptr From = Base;
+    const uptr To = Base + Size;
+    auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
+      if (Block < From || Block > To)
+        return;
+      uptr ChunkSize;
+      const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
+      if (ChunkBase != InvalidChunk)
+        Callback(ChunkBase, ChunkSize, Arg);
+    };
+    Primary.iterateOverBlocks(Lambda);
+    Secondary.iterateOverBlocks(Lambda);
+  }
+
+  bool canReturnNull() {
+    initThreadMaybe();
+    return Options.MayReturnNull;
+  }
+
+  // TODO(kostyak): implement this as a "backend" to mallopt.
+  bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
+
+  // Return the usable size for a given chunk. Technically we lie, as we just
+  // report the actual size of a chunk. This is done to counteract code actively
+  // writing past the end of a chunk (like sqlite3) when the usable size allows
+  // for it, which then forces realloc to copy the usable size of a chunk as
+  // opposed to its actual size.
+  uptr getUsableSize(const void *Ptr) {
+    initThreadMaybe();
+    if (UNLIKELY(!Ptr))
+      return 0;
+    Chunk::UnpackedHeader Header;
+    Chunk::loadHeader(Cookie, Ptr, &Header);
+    // Getting the usable size of a chunk only makes sense if it's allocated.
+    if (UNLIKELY(Header.State != Chunk::State::Allocated))
+      reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
+    return getSize(Ptr, &Header);
+  }
+
+  void getStats(StatCounters S) {
+    initThreadMaybe();
+    Stats.get(S);
+  }
+
+private:
+  typedef MapAllocator SecondaryT;
+  typedef typename PrimaryT::SizeClassMap SizeClassMap;
+
+  static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
+  static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
+  static const uptr MinAlignment = 1UL << MinAlignmentLog;
+  static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
+  static const uptr MaxAllowedMallocSize =
+      FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
+
+  // Constants used by the chunk iteration mechanism.
+  static const u32 BlockMarker = 0x44554353U;
+  static const uptr InvalidChunk = ~static_cast<uptr>(0);
+
+  GlobalStats Stats;
+  TSDRegistryT TSDRegistry;
+  PrimaryT Primary;
+  SecondaryT Secondary;
+  QuarantineT Quarantine;
+
+  u32 Cookie;
+
+  struct {
+    u8 MayReturnNull : 1;       // may_return_null
+    u8 ZeroContents : 1;        // zero_contents
+    u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
+    u8 DeleteSizeMismatch : 1;  // delete_size_mismatch
+    u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
+  } Options;
+
+  // The following might get optimized out by the compiler.
+  NOINLINE void performSanityChecks() {
+    // Verify that the header offset field can hold the maximum offset. In the
+    // case of the Secondary allocator, it takes care of alignment and the
+    // offset will always be small. In the case of the Primary, the worst case
+    // scenario happens in the last size class, when the backend allocation
+    // would already be aligned on the requested alignment, which would happen
+    // to be the maximum alignment that would fit in that size class. As a
+    // result, the maximum offset will be at most the maximum alignment for the
+    // last size class minus the header size, in multiples of MinAlignment.
+    Chunk::UnpackedHeader Header = {};
+    const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
+                                         SizeClassMap::MaxSize - MinAlignment);
+    const uptr MaxOffset =
+        (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+    Header.Offset = MaxOffset & Chunk::OffsetMask;
+    if (UNLIKELY(Header.Offset != MaxOffset))
+      reportSanityCheckError("offset");
+
+    // Verify that we can fit the maximum size or amount of unused bytes in the
+    // header. Given that the Secondary fits the allocation to a page, the worst
+    // case scenario happens in the Primary. It will depend on the second to
+    // last and last class sizes, as well as the dynamic base for the Primary.
+    // The following is an over-approximation that works for our needs.
+    const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
+    Header.SizeOrUnusedBytes =
+        MaxSizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+    if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
+      reportSanityCheckError("size (or unused bytes)");
+
+    const uptr LargestClassId = SizeClassMap::LargestClassId;
+    Header.ClassId = LargestClassId;
+    if (UNLIKELY(Header.ClassId != LargestClassId))
+      reportSanityCheckError("class ID");
+  }
+
+  static INLINE void *getBlockBegin(const void *Ptr,
+                                    Chunk::UnpackedHeader *Header) {
+    return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+                                    Chunk::getHeaderSize() -
+                                    (Header->Offset << MinAlignmentLog));
+  }
+
+  // Return the size of a chunk as requested during its allocation.
+  INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
+    const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+    if (Header->ClassId)
+      return SizeOrUnusedBytes;
+    return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
+           reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
+  }
+
+  ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+    TSDRegistry.initThreadMaybe(this, MinimalInit);
+  }
+
+  void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
+                                   uptr Size) {
+    Chunk::UnpackedHeader NewHeader = *Header;
+    // If the quarantine is disabled, the actual size of a chunk is 0 or larger
+    // than the maximum allowed, we return a chunk directly to the backend.
+    const bool BypassQuarantine = !Quarantine.getCacheSize() || !Size ||
+                                  (Size > Options.QuarantineMaxChunkSize);
+    if (BypassQuarantine) {
+      NewHeader.State = Chunk::State::Available;
+      Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+      void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
+      const uptr ClassId = NewHeader.ClassId;
+      if (ClassId) {
+        bool UnlockRequired;
+        auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+        TSD->Cache.deallocate(ClassId, BlockBegin);
+        if (UnlockRequired)
+          TSD->unlock();
+      } else {
+        Secondary.deallocate(BlockBegin);
+      }
+    } else {
+      NewHeader.State = Chunk::State::Quarantined;
+      Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+      bool UnlockRequired;
+      auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+      Quarantine.put(&TSD->QuarantineCache,
+                     QuarantineCallback(*this, TSD->Cache), Ptr, Size);
+      if (UnlockRequired)
+        TSD->unlock();
+    }
+  }
+
+  // This only cares about valid busy chunks. This might change in the future.
+  uptr getChunkFromBlock(uptr Block, uptr *Size) {
+    u32 Offset = 0;
+    if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
+      Offset = reinterpret_cast<u32 *>(Block)[1];
+    const uptr P = Block + Offset + Chunk::getHeaderSize();
+    const void *Ptr = reinterpret_cast<const void *>(P);
+    Chunk::UnpackedHeader Header;
+    if (!Chunk::isValid(Cookie, Ptr, &Header) ||
+        Header.State != Chunk::State::Allocated)
+      return InvalidChunk;
+    if (Size)
+      *Size = getSize(Ptr, &Header);
+    return P;
+  }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_COMBINED_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/common.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/common.cc
new file mode 100644
index 0000000..2a26efb
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/common.cc
@@ -0,0 +1,32 @@
+//===-- common.cc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "common.h"
+#include "atomic_helpers.h"
+
+namespace scudo {
+
+uptr PageSizeCached;
+uptr getPageSize();
+
+uptr getPageSizeSlow() {
+  PageSizeCached = getPageSize();
+  CHECK_NE(PageSizeCached, 0);
+  return PageSizeCached;
+}
+
+// Fatal internal map() or unmap() error (potentially OOM related).
+void NORETURN dieOnMapUnmapError(bool OutOfMemory) {
+  outputRaw("Scudo ERROR: internal map or unmap failure");
+  if (OutOfMemory)
+    outputRaw(" (OOM)");
+  outputRaw("\n");
+  die();
+}
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/common.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/common.h
new file mode 100644
index 0000000..c015d1c
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/common.h
@@ -0,0 +1,176 @@
+//===-- common.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMMON_H_
+#define SCUDO_COMMON_H_
+
+#include "internal_defs.h"
+
+#include "fuchsia.h"
+#include "linux.h"
+
+#include <stddef.h>
+#include <string.h>
+
+namespace scudo {
+
+template <class Dest, class Source> INLINE Dest bit_cast(const Source &S) {
+  COMPILER_CHECK(sizeof(Dest) == sizeof(Source));
+  Dest D;
+  memcpy(&D, &S, sizeof(D));
+  return D;
+}
+
+INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) {
+  return (X + Boundary - 1) & ~(Boundary - 1);
+}
+
+INLINE constexpr uptr roundDownTo(uptr X, uptr Boundary) {
+  return X & ~(Boundary - 1);
+}
+
+INLINE constexpr bool isAligned(uptr X, uptr Alignment) {
+  return (X & (Alignment - 1)) == 0;
+}
+
+template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
+
+template <class T> constexpr T Max(T A, T B) { return A > B ? A : B; }
+
+template <class T> void Swap(T &A, T &B) {
+  T Tmp = A;
+  A = B;
+  B = Tmp;
+}
+
+INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
+
+INLINE uptr getMostSignificantSetBitIndex(uptr X) {
+  DCHECK_NE(X, 0U);
+  return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
+}
+
+INLINE uptr roundUpToPowerOfTwo(uptr Size) {
+  DCHECK(Size);
+  if (isPowerOfTwo(Size))
+    return Size;
+  const uptr Up = getMostSignificantSetBitIndex(Size);
+  DCHECK_LT(Size, (1UL << (Up + 1)));
+  DCHECK_GT(Size, (1UL << Up));
+  return 1UL << (Up + 1);
+}
+
+INLINE uptr getLeastSignificantSetBitIndex(uptr X) {
+  DCHECK_NE(X, 0U);
+  return static_cast<uptr>(__builtin_ctzl(X));
+}
+
+INLINE uptr getLog2(uptr X) {
+  DCHECK(isPowerOfTwo(X));
+  return getLeastSignificantSetBitIndex(X);
+}
+
+INLINE u32 getRandomU32(u32 *State) {
+  // ANSI C linear congruential PRNG (16-bit output).
+  // return (*State = *State * 1103515245 + 12345) >> 16;
+  // XorShift (32-bit output).
+  *State ^= *State << 13;
+  *State ^= *State >> 17;
+  *State ^= *State << 5;
+  return *State;
+}
+
+INLINE u32 getRandomModN(u32 *State, u32 N) {
+  return getRandomU32(State) % N; // [0, N)
+}
+
+template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
+  if (N <= 1)
+    return;
+  u32 State = *RandState;
+  for (u32 I = N - 1; I > 0; I--)
+    Swap(A[I], A[getRandomModN(&State, I + 1)]);
+  *RandState = State;
+}
+
+// Hardware specific inlinable functions.
+
+INLINE void yieldProcessor(u8 Count) {
+#if defined(__i386__) || defined(__x86_64__)
+  __asm__ __volatile__("" ::: "memory");
+  for (u8 I = 0; I < Count; I++)
+    __asm__ __volatile__("pause");
+#elif defined(__aarch64__) || defined(__arm__)
+  __asm__ __volatile__("" ::: "memory");
+  for (u8 I = 0; I < Count; I++)
+    __asm__ __volatile__("yield");
+#endif
+  __asm__ __volatile__("" ::: "memory");
+}
+
+// Platform specific functions.
+
+extern uptr PageSizeCached;
+uptr getPageSizeSlow();
+INLINE uptr getPageSizeCached() {
+  // Bionic uses a hardcoded value.
+  if (SCUDO_ANDROID)
+    return 4096U;
+  if (LIKELY(PageSizeCached))
+    return PageSizeCached;
+  return getPageSizeSlow();
+}
+
+u32 getNumberOfCPUs();
+
+const char *getEnv(const char *Name);
+
+u64 getMonotonicTime();
+
+// Our randomness gathering function is limited to 256 bytes to ensure we get
+// as many bytes as requested, and avoid interruptions (on Linux).
+constexpr uptr MaxRandomLength = 256U;
+bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
+
+// Platform memory mapping functions.
+
+#define MAP_ALLOWNOMEM (1U << 0)
+#define MAP_NOACCESS (1U << 1)
+#define MAP_RESIZABLE (1U << 2)
+
+// Our platform memory mapping use is restricted to 3 scenarios:
+// - reserve memory at a random address (MAP_NOACCESS);
+// - commit memory in a previously reserved space;
+// - commit memory at a random address.
+// As such, only a subset of parameters combinations is valid, which is checked
+// by the function implementation. The Data parameter allows to pass opaque
+// platform specific data to the function.
+// Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
+          MapPlatformData *Data = nullptr);
+
+// Indicates that we are getting rid of the whole mapping, which might have
+// further consequences on Data, depending on the platform.
+#define UNMAP_ALL (1U << 0)
+
+void unmap(void *Addr, uptr Size, uptr Flags = 0,
+           MapPlatformData *Data = nullptr);
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+                      MapPlatformData *Data = nullptr);
+
+// Internal map & unmap fatal error. This must not call map().
+void NORETURN dieOnMapUnmapError(bool OutOfMemory = false);
+
+// Logging related functions.
+
+void setAbortMessage(const char *Message);
+
+} // namespace scudo
+
+#endif // SCUDO_COMMON_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cc
new file mode 100644
index 0000000..f4dae7b
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cc
@@ -0,0 +1,19 @@
+//===-- crc32_hw.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+
+namespace scudo {
+
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+  return static_cast<u32>(CRC32_INTRINSIC(Crc, Data));
+}
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.cc
new file mode 100644
index 0000000..21144f2
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.cc
@@ -0,0 +1,57 @@
+//===-- flags.cc ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags.h"
+#include "common.h"
+#include "flags_parser.h"
+#include "interface.h"
+
+namespace scudo {
+
+Flags *getFlags() {
+  static Flags F;
+  return &F;
+}
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "flags.inc"
+#undef SCUDO_FLAG
+}
+
+void registerFlags(FlagParser *Parser, Flags *F) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description)                      \
+  Parser->registerFlag(#Name, Description, FlagType::FT_##Type,                \
+                       reinterpret_cast<void *>(&F->Name));
+#include "flags.inc"
+#undef SCUDO_FLAG
+}
+
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+  return STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+  return "";
+#endif
+}
+
+static const char *getScudoDefaultOptions() {
+  return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
+void initFlags() {
+  Flags *F = getFlags();
+  F->setDefaults();
+  FlagParser Parser;
+  registerFlags(&Parser, F);
+  Parser.parseString(getCompileDefinitionScudoDefaultOptions());
+  Parser.parseString(getScudoDefaultOptions());
+  Parser.parseString(getEnv("SCUDO_OPTIONS"));
+}
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.h
new file mode 100644
index 0000000..edd39a1
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.h
@@ -0,0 +1,30 @@
+//===-- flags.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "flags.inc"
+#undef SCUDO_FLAG
+  void setDefaults();
+};
+
+Flags *getFlags();
+void initFlags();
+class FlagParser;
+void registerFlags(FlagParser *Parser, Flags *F);
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc
new file mode 100644
index 0000000..25b86e1
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc
@@ -0,0 +1,50 @@
+//===-- flags.inc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+#error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, quarantine_size_kb, 0,
+           "Size (in kilobytes) of quarantine used to delay the actual "
+           "deallocation of chunks. Lower value may reduce memory usage but "
+           "decrease the effectiveness of the mitigation.")
+
+SCUDO_FLAG(int, thread_local_quarantine_size_kb, 0,
+           "Size (in kilobytes) of per-thread cache used to offload the global "
+           "quarantine. Lower value may reduce memory usage but might increase "
+           "the contention on the global quarantine.")
+
+SCUDO_FLAG(int, quarantine_max_chunk_size, 0,
+           "Size (in bytes) up to which chunks will be quarantined (if lower "
+           "than or equal to).")
+
+SCUDO_FLAG(bool, dealloc_type_mismatch, false,
+           "Terminate on a type mismatch in allocation-deallocation functions, "
+           "eg: malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, delete_size_mismatch, true,
+           "Terminate on a size mismatch between a sized-delete and the actual "
+           "size of a chunk (as provided to new/new[]).")
+
+SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.")
+
+SCUDO_FLAG(int, rss_limit_mb, -1,
+           "Enforce an upper limit (in megabytes) to the process RSS. The "
+           "allocator will terminate or return NULL when allocations are "
+           "attempted past that limit (depending on may_return_null). Negative "
+           "values disable the feature.")
+
+SCUDO_FLAG(bool, may_return_null, true,
+           "Indicate whether the allocator should terminate instead of "
+           "returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
+           "invalid allocation alignments, etc.")
+
+SCUDO_FLAG(int, release_to_os_interval_ms, 5000,
+           "Interval (in milliseconds) at which to attempt release of unused "
+           "memory to the OS. Negative values disable the feature.")
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cc
new file mode 100644
index 0000000..5f1253f
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cc
@@ -0,0 +1,164 @@
+//===-- flags_parser.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags_parser.h"
+#include "common.h"
+#include "report.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+namespace scudo {
+
+class UnknownFlagsRegistry {
+  static const u32 MaxUnknownFlags = 16;
+  const char *UnknownFlagsNames[MaxUnknownFlags];
+  u32 NumberOfUnknownFlags;
+
+public:
+  void add(const char *Name) {
+    CHECK_LT(NumberOfUnknownFlags, MaxUnknownFlags);
+    UnknownFlagsNames[NumberOfUnknownFlags++] = Name;
+  }
+
+  void report() {
+    if (!NumberOfUnknownFlags)
+      return;
+    Printf("Scudo WARNING: found %d unrecognized flag(s):\n",
+           NumberOfUnknownFlags);
+    for (u32 I = 0; I < NumberOfUnknownFlags; ++I)
+      Printf("    %s\n", UnknownFlagsNames[I]);
+    NumberOfUnknownFlags = 0;
+  }
+};
+static UnknownFlagsRegistry UnknownFlags;
+
+void reportUnrecognizedFlags() { UnknownFlags.report(); }
+
+void FlagParser::printFlagDescriptions() {
+  Printf("Available flags for Scudo:\n");
+  for (u32 I = 0; I < NumberOfFlags; ++I)
+    Printf("\t%s\n\t\t- %s\n", Flags[I].Name, Flags[I].Desc);
+}
+
+static bool isSeparator(char C) {
+  return C == ' ' || C == ',' || C == ':' || C == '\n' || C == '\t' ||
+         C == '\r';
+}
+
+static bool isSeparatorOrNull(char C) { return !C || isSeparator(C); }
+
+void FlagParser::skipWhitespace() {
+  while (isSeparator(Buffer[Pos]))
+    ++Pos;
+}
+
+void FlagParser::parseFlag() {
+  const uptr NameStart = Pos;
+  while (Buffer[Pos] != '=' && !isSeparatorOrNull(Buffer[Pos]))
+    ++Pos;
+  if (Buffer[Pos] != '=')
+    reportError("expected '='");
+  const char *Name = Buffer + NameStart;
+  const uptr ValueStart = ++Pos;
+  const char *Value;
+  if (Buffer[Pos] == '\'' || Buffer[Pos] == '"') {
+    const char Quote = Buffer[Pos++];
+    while (Buffer[Pos] != 0 && Buffer[Pos] != Quote)
+      ++Pos;
+    if (Buffer[Pos] == 0)
+      reportError("unterminated string");
+    Value = Buffer + ValueStart + 1;
+    ++Pos; // consume the closing quote
+  } else {
+    while (!isSeparatorOrNull(Buffer[Pos]))
+      ++Pos;
+    Value = Buffer + ValueStart;
+  }
+  if (!runHandler(Name, Value))
+    reportError("flag parsing failed.");
+}
+
+void FlagParser::parseFlags() {
+  while (true) {
+    skipWhitespace();
+    if (Buffer[Pos] == 0)
+      break;
+    parseFlag();
+  }
+}
+
+void FlagParser::parseString(const char *S) {
+  if (!S)
+    return;
+  // Backup current parser state to allow nested parseString() calls.
+  const char *OldBuffer = Buffer;
+  const uptr OldPos = Pos;
+  Buffer = S;
+  Pos = 0;
+
+  parseFlags();
+
+  Buffer = OldBuffer;
+  Pos = OldPos;
+}
+
+INLINE bool parseBool(const char *Value, bool *b) {
+  if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
+      strncmp(Value, "false", 5) == 0) {
+    *b = false;
+    return true;
+  }
+  if (strncmp(Value, "1", 1) == 0 || strncmp(Value, "yes", 3) == 0 ||
+      strncmp(Value, "true", 4) == 0) {
+    *b = true;
+    return true;
+  }
+  return false;
+}
+
+bool FlagParser::runHandler(const char *Name, const char *Value) {
+  for (u32 I = 0; I < NumberOfFlags; ++I) {
+    const uptr Len = strlen(Flags[I].Name);
+    if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != '=')
+      continue;
+    bool Ok = false;
+    switch (Flags[I].Type) {
+    case FlagType::FT_bool:
+      Ok = parseBool(Value, reinterpret_cast<bool *>(Flags[I].Var));
+      if (!Ok)
+        reportInvalidFlag("bool", Value);
+      break;
+    case FlagType::FT_int:
+      char *ValueEnd;
+      *reinterpret_cast<int *>(Flags[I].Var) =
+          static_cast<int>(strtol(Value, &ValueEnd, 10));
+      Ok =
+          *ValueEnd == '"' || *ValueEnd == '\'' || isSeparatorOrNull(*ValueEnd);
+      if (!Ok)
+        reportInvalidFlag("int", Value);
+      break;
+    }
+    return Ok;
+  }
+  // Unrecognized flag. This is not a fatal error, we may print a warning later.
+  UnknownFlags.add(Name);
+  return true;
+}
+
+void FlagParser::registerFlag(const char *Name, const char *Desc, FlagType Type,
+                              void *Var) {
+  CHECK_LT(NumberOfFlags, MaxFlags);
+  Flags[NumberOfFlags].Name = Name;
+  Flags[NumberOfFlags].Desc = Desc;
+  Flags[NumberOfFlags].Type = Type;
+  Flags[NumberOfFlags].Var = Var;
+  ++NumberOfFlags;
+}
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h
new file mode 100644
index 0000000..857b50e
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h
@@ -0,0 +1,55 @@
+//===-- flags_parser.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_PARSER_H_
+#define SCUDO_FLAGS_PARSER_H_
+
+#include "report.h"
+#include "string_utils.h"
+
+#include <stddef.h>
+
+namespace scudo {
+
+enum class FlagType : u8 {
+  FT_bool,
+  FT_int,
+};
+
+class FlagParser {
+public:
+  void registerFlag(const char *Name, const char *Desc, FlagType Type,
+                    void *Var);
+  void parseString(const char *S);
+  void printFlagDescriptions();
+
+private:
+  static const u32 MaxFlags = 12;
+  struct Flag {
+    const char *Name;
+    const char *Desc;
+    FlagType Type;
+    void *Var;
+  } Flags[MaxFlags];
+
+  u32 NumberOfFlags = 0;
+  const char *Buffer = nullptr;
+  uptr Pos = 0;
+
+  void reportFatalError(const char *Error);
+  void skipWhitespace();
+  void parseFlags();
+  void parseFlag();
+  bool runHandler(const char *Name, const char *Value);
+};
+
+void reportUnrecognizedFlags();
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_PARSER_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cc
new file mode 100644
index 0000000..896d346
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cc
@@ -0,0 +1,189 @@
+//===-- fuchsia.cc ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#include <limits.h>         // for PAGE_SIZE
+#include <stdlib.h>         // for getenv()
+#include <zircon/compiler.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+
+namespace scudo {
+
+uptr getPageSize() { return PAGE_SIZE; }
+
+void NORETURN die() { __builtin_trap(); }
+
+// We zero-initialize the Extra parameter of map(), make sure this is consistent
+// with ZX_HANDLE_INVALID.
+COMPILER_CHECK(ZX_HANDLE_INVALID == 0);
+
+static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
+  // Only scenario so far.
+  DCHECK(Data);
+  DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
+
+  const zx_status_t Status = _zx_vmar_allocate(
+      _zx_vmar_root_self(),
+      ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+      Size, &Data->Vmar, &Data->VmarBase);
+  if (Status != ZX_OK) {
+    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+      dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+    return nullptr;
+  }
+  return reinterpret_cast<void *>(Data->VmarBase);
+}
+
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
+          MapPlatformData *Data) {
+  DCHECK_EQ(Size % PAGE_SIZE, 0);
+  const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+
+  // For MAP_NOACCESS, just allocate a Vmar and return.
+  if (Flags & MAP_NOACCESS)
+    return allocateVmar(Size, Data, AllowNoMem);
+
+  const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+  CHECK_NE(Vmar, ZX_HANDLE_INVALID);
+
+  zx_status_t Status;
+  zx_handle_t Vmo;
+  uint64_t VmoSize = 0;
+  if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
+    // If a Vmo was specified, it's a resize operation.
+    CHECK(Addr);
+    DCHECK(Flags & MAP_RESIZABLE);
+    Vmo = Data->Vmo;
+    VmoSize = Data->VmoSize;
+    Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
+    if (Status != ZX_OK) {
+      if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+        dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+      return nullptr;
+    }
+  } else {
+    // Otherwise, create a Vmo and set its name.
+    Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
+    if (Status != ZX_OK) {
+      if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+        dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+      return nullptr;
+    }
+    _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
+  }
+
+  uintptr_t P;
+  zx_vm_option_t MapFlags =
+      ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
+  const uint64_t Offset =
+      Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
+  if (Offset)
+    MapFlags |= ZX_VM_SPECIFIC;
+  Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
+  // No need to track the Vmo if we don't intend on resizing it. Close it.
+  if (Flags & MAP_RESIZABLE) {
+    DCHECK(Data);
+    DCHECK_EQ(Data->Vmo, ZX_HANDLE_INVALID);
+    Data->Vmo = Vmo;
+  } else {
+    CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
+  }
+  if (Status != ZX_OK) {
+    if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+      dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+    return nullptr;
+  }
+  if (Data)
+    Data->VmoSize += Size;
+
+  return reinterpret_cast<void *>(P);
+}
+
+void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
+  if (Flags & UNMAP_ALL) {
+    DCHECK_NE(Data, nullptr);
+    const zx_handle_t Vmar = Data->Vmar;
+    DCHECK_NE(Vmar, _zx_vmar_root_self());
+    // Destroying the vmar effectively unmaps the whole mapping.
+    CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
+    CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
+  } else {
+    const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+    const zx_status_t Status =
+        _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
+    if (Status != ZX_OK)
+      dieOnMapUnmapError();
+  }
+  if (Data) {
+    if (Data->Vmo != ZX_HANDLE_INVALID)
+      CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
+    memset(Data, 0, sizeof(*Data));
+  }
+}
+
+void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
+                      MapPlatformData *Data) {
+  DCHECK(Data);
+  DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
+  DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
+  const zx_status_t Status =
+      _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
+  CHECK_EQ(Status, ZX_OK);
+}
+
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
+// because the Fuchsia implementation of sync_mutex_t has clang thread safety
+// annotations. Were we to apply proper capability annotations to the top level
+// HybridMutex class itself, they would not be needed. As it stands, the
+// thread analysis thinks that we are locking the mutex and accidentally leaving
+// it locked on the way out.
+bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+  // Size and alignment must be compatible between both types.
+  return sync_mutex_trylock(&M) == ZX_OK;
+}
+
+void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
+  sync_mutex_lock(&M);
+}
+
+void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+  sync_mutex_unlock(&M);
+}
+
+u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
+
+u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
+
+bool getRandom(void *Buffer, uptr Length, bool Blocking) {
+  COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
+  if (!Buffer || !Length || Length > MaxRandomLength)
+    return false;
+  _zx_cprng_draw(Buffer, Length);
+  return true;
+}
+
+void outputRaw(const char *Buffer) {
+  __sanitizer_log_write(Buffer, strlen(Buffer));
+}
+
+void setAbortMessage(const char *Message) {}
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h
new file mode 100644
index 0000000..d6993f8
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h
@@ -0,0 +1,31 @@
+//===-- fuchsia.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FUCHSIA_H_
+#define SCUDO_FUCHSIA_H_
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include <zircon/process.h>
+
+namespace scudo {
+
+struct MapPlatformData {
+  zx_handle_t Vmar;
+  zx_handle_t Vmo;
+  uintptr_t VmarBase;
+  uint64_t VmoSize;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_FUCHSIA_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/interface.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/interface.h
new file mode 100644
index 0000000..e263982
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/interface.h
@@ -0,0 +1,29 @@
+//===-- interface.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERFACE_H_
+#define SCUDO_INTERFACE_H_
+
+#include "internal_defs.h"
+
+extern "C" {
+
+WEAK INTERFACE const char *__scudo_default_options();
+
+// Post-allocation & pre-deallocation hooks.
+// They must be thread-safe and not use heap related functions.
+WEAK INTERFACE void __scudo_allocate_hook(void *ptr, size_t size);
+WEAK INTERFACE void __scudo_deallocate_hook(void *ptr);
+
+WEAK INTERFACE void __scudo_print_stats(void);
+
+typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
+
+} // extern "C"
+
+#endif // SCUDO_INTERFACE_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
new file mode 100644
index 0000000..901eac3
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
@@ -0,0 +1,135 @@
+//===-- internal_defs.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERNAL_DEFS_H_
+#define SCUDO_INTERNAL_DEFS_H_
+
+#include "platform.h"
+
+#include <stdint.h>
+
+#ifndef SCUDO_DEBUG
+#define SCUDO_DEBUG 0
+#endif
+
+#define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
+
+// String related macros.
+
+#define STRINGIFY_(S) #S
+#define STRINGIFY(S) STRINGIFY_(S)
+#define CONCATENATE_(S, C) S##C
+#define CONCATENATE(S, C) CONCATENATE_(S, C)
+
+// Attributes & builtins related macros.
+
+#define INTERFACE __attribute__((visibility("default")))
+#define WEAK __attribute__((weak))
+#define INLINE inline
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define ALIAS(X) __attribute__((alias(X)))
+// Please only use the ALIGNED macro before the type. Using ALIGNED after the
+// variable declaration is not portable.
+#define ALIGNED(X) __attribute__((aligned(X)))
+#define FORMAT(F, A) __attribute__((format(printf, F, A)))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define THREADLOCAL __thread
+#define LIKELY(X) __builtin_expect(!!(X), 1)
+#define UNLIKELY(X) __builtin_expect(!!(X), 0)
+#if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(X) generates prefetchnt0 on x86
+#define PREFETCH(X) __asm__("prefetchnta (%0)" : : "r"(X))
+#else
+#define PREFETCH(X) __builtin_prefetch(X)
+#endif
+#define UNUSED __attribute__((unused))
+#define USED __attribute__((used))
+#define NOEXCEPT noexcept
+
+namespace scudo {
+
+typedef unsigned long uptr;
+typedef signed long sptr;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef signed char s8;
+typedef signed short s16;
+typedef signed int s32;
+typedef signed long long s64;
+
+// The following two functions have platform specific implementations.
+void outputRaw(const char *Buffer);
+void NORETURN die();
+
+#define RAW_CHECK_MSG(Expr, Msg)                                               \
+  do {                                                                         \
+    if (UNLIKELY(!(Expr))) {                                                   \
+      outputRaw(Msg);                                                          \
+      die();                                                                   \
+    }                                                                          \
+  } while (false)
+
+#define RAW_CHECK(Expr) RAW_CHECK_MSG(Expr, #Expr)
+
+void NORETURN reportCheckFailed(const char *File, int Line,
+                                const char *Condition, u64 Value1, u64 Value2);
+
+#define CHECK_IMPL(C1, Op, C2)                                                 \
+  do {                                                                         \
+    u64 V1 = (u64)(C1);                                                        \
+    u64 V2 = (u64)(C2);                                                        \
+    if (UNLIKELY(!(V1 Op V2))) {                                               \
+      reportCheckFailed(__FILE__, __LINE__, "(" #C1 ") " #Op " (" #C2 ")", V1, \
+                        V2);                                                   \
+      die();                                                                   \
+    }                                                                          \
+  } while (false)
+
+#define CHECK(A) CHECK_IMPL((A), !=, 0)
+#define CHECK_EQ(A, B) CHECK_IMPL((A), ==, (B))
+#define CHECK_NE(A, B) CHECK_IMPL((A), !=, (B))
+#define CHECK_LT(A, B) CHECK_IMPL((A), <, (B))
+#define CHECK_LE(A, B) CHECK_IMPL((A), <=, (B))
+#define CHECK_GT(A, B) CHECK_IMPL((A), >, (B))
+#define CHECK_GE(A, B) CHECK_IMPL((A), >=, (B))
+
+#if SCUDO_DEBUG
+#define DCHECK(A) CHECK(A)
+#define DCHECK_EQ(A, B) CHECK_EQ(A, B)
+#define DCHECK_NE(A, B) CHECK_NE(A, B)
+#define DCHECK_LT(A, B) CHECK_LT(A, B)
+#define DCHECK_LE(A, B) CHECK_LE(A, B)
+#define DCHECK_GT(A, B) CHECK_GT(A, B)
+#define DCHECK_GE(A, B) CHECK_GE(A, B)
+#else
+#define DCHECK(A)
+#define DCHECK_EQ(A, B)
+#define DCHECK_NE(A, B)
+#define DCHECK_LT(A, B)
+#define DCHECK_LE(A, B)
+#define DCHECK_GT(A, B)
+#define DCHECK_GE(A, B)
+#endif
+
+// The superfluous die() call effectively makes this macro NORETURN.
+#define UNREACHABLE(Msg)                                                       \
+  do {                                                                         \
+    CHECK(0 && Msg);                                                           \
+    die();                                                                     \
+  } while (0)
+
+#define COMPILER_CHECK(Pred) static_assert(Pred, "")
+
+enum LinkerInitialized { LINKER_INITIALIZED = 0 };
+
+} // namespace scudo
+
+#endif // SCUDO_INTERNAL_DEFS_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/linux.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/linux.cc
new file mode 100644
index 0000000..049477b
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/linux.cc
@@ -0,0 +1,171 @@
+//===-- linux.cc ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "common.h"
+#include "linux.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/futex.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#if SCUDO_ANDROID
+#include <sys/prctl.h>
+// Definitions of prctl arguments to set a vma name in Android kernels.
+#define ANDROID_PR_SET_VMA 0x53564d41
+#define ANDROID_PR_SET_VMA_ANON_NAME 0
+#endif
+
+namespace scudo {
+
+uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
+
+void NORETURN die() { abort(); }
+
+void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
+          UNUSED MapPlatformData *Data) {
+  int MmapFlags = MAP_PRIVATE | MAP_ANON;
+  int MmapProt;
+  if (Flags & MAP_NOACCESS) {
+    MmapFlags |= MAP_NORESERVE;
+    MmapProt = PROT_NONE;
+  } else {
+    MmapProt = PROT_READ | PROT_WRITE;
+  }
+  if (Addr) {
+    // Currently no scenario for a noaccess mapping with a fixed address.
+    DCHECK_EQ(Flags & MAP_NOACCESS, 0);
+    MmapFlags |= MAP_FIXED;
+  }
+  void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
+  if (P == MAP_FAILED) {
+    if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+      dieOnMapUnmapError(errno == ENOMEM);
+    return nullptr;
+  }
+#if SCUDO_ANDROID
+  if (!(Flags & MAP_NOACCESS))
+    prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
+#endif
+  return P;
+}
+
+void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
+           UNUSED MapPlatformData *Data) {
+  if (munmap(Addr, Size) != 0)
+    dieOnMapUnmapError();
+}
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+                      UNUSED MapPlatformData *Data) {
+  void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
+  while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+  }
+}
+
+// Calling getenv should be fine (c)(tm) at any time.
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+namespace {
+enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
+}
+
+bool HybridMutex::tryLock() {
+  return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
+}
+
+// The following is based on https://akkadia.org/drepper/futex.pdf.
+void HybridMutex::lockSlow() {
+  u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
+  if (V == Unlocked)
+    return;
+  if (V != Sleeping)
+    V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+  while (V != Unlocked) {
+    syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
+            nullptr, nullptr, 0);
+    V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+  }
+}
+
+void HybridMutex::unlock() {
+  if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
+    atomic_store(&M, Unlocked, memory_order_release);
+    syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
+            nullptr, nullptr, 0);
+  }
+}
+
+u64 getMonotonicTime() {
+  timespec TS;
+  clock_gettime(CLOCK_MONOTONIC, &TS);
+  return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+         static_cast<u64>(TS.tv_nsec);
+}
+
+u32 getNumberOfCPUs() {
+  cpu_set_t CPUs;
+  CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
+  return static_cast<u32>(CPU_COUNT(&CPUs));
+}
+
+// Blocking is possibly unused if the getrandom block is not compiled in.
+bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
+  if (!Buffer || !Length || Length > MaxRandomLength)
+    return false;
+  ssize_t ReadBytes;
+#if defined(SYS_getrandom)
+#if !defined(GRND_NONBLOCK)
+#define GRND_NONBLOCK 1
+#endif
+  // Up to 256 bytes, getrandom will not be interrupted.
+  ReadBytes =
+      syscall(SYS_getrandom, Buffer, Length, Blocking ? 0 : GRND_NONBLOCK);
+  if (ReadBytes == static_cast<ssize_t>(Length))
+    return true;
+#endif // defined(SYS_getrandom)
+  // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
+  // Blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
+  const int FileDesc = open("/dev/urandom", O_RDONLY);
+  if (FileDesc == -1)
+    return false;
+  ReadBytes = read(FileDesc, Buffer, Length);
+  close(FileDesc);
+  return (ReadBytes == static_cast<ssize_t>(Length));
+}
+
+void outputRaw(const char *Buffer) {
+  static HybridMutex Mutex;
+  ScopedLock L(Mutex);
+  write(2, Buffer, strlen(Buffer));
+}
+
+extern "C" WEAK void android_set_abort_message(const char *);
+
+void setAbortMessage(const char *Message) {
+  if (&android_set_abort_message)
+    android_set_abort_message(Message);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/linux.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/linux.h
new file mode 100644
index 0000000..92c9eb5
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/linux.h
@@ -0,0 +1,70 @@
+//===-- linux.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LINUX_H_
+#define SCUDO_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+namespace scudo {
+
+// MapPlatformData is unused on Linux, define it as a minimally sized structure.
+struct MapPlatformData {};
+
+#if SCUDO_ANDROID
+
+#if defined(__aarch64__)
+#define __get_tls()                                                            \
+  ({                                                                           \
+    void **__v;                                                                \
+    __asm__("mrs %0, tpidr_el0" : "=r"(__v));                                  \
+    __v;                                                                       \
+  })
+#elif defined(__arm__)
+#define __get_tls()                                                            \
+  ({                                                                           \
+    void **__v;                                                                \
+    __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v));                         \
+    __v;                                                                       \
+  })
+#elif defined(__i386__)
+#define __get_tls()                                                            \
+  ({                                                                           \
+    void **__v;                                                                \
+    __asm__("movl %%gs:0, %0" : "=r"(__v));                                    \
+    __v;                                                                       \
+  })
+#elif defined(__x86_64__)
+#define __get_tls()                                                            \
+  ({                                                                           \
+    void **__v;                                                                \
+    __asm__("mov %%fs:0, %0" : "=r"(__v));                                     \
+    __v;                                                                       \
+  })
+#else
+#error "Unsupported architecture."
+#endif
+
+// The Android Bionic team has allocated a TLS slot for sanitizers starting
+// with Q, given that Android currently doesn't support ELF TLS. It is used to
+// store sanitizer thread specific data.
+static const int TLS_SLOT_SANITIZER = 8; // TODO(kostyak): 6 for Q!!
+
+ALWAYS_INLINE uptr *getAndroidTlsPtr() {
+  return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
+}
+
+#endif // SCUDO_ANDROID
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_LINUX_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/list.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/list.h
new file mode 100644
index 0000000..139e73e
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/list.h
@@ -0,0 +1,156 @@
+//===-- list.h --------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LIST_H_
+#define SCUDO_LIST_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Intrusive POD singly-linked list.
+// An object with all zero fields should represent a valid empty list. clear()
+// should be called on all non-zero-initialized objects before using.
+template <class Item> struct IntrusiveList {
+  friend class Iterator;
+
+  void clear() {
+    First = Last = nullptr;
+    Size = 0;
+  }
+
+  bool empty() const { return Size == 0; }
+  uptr size() const { return Size; }
+
+  void push_back(Item *X) {
+    if (empty()) {
+      X->Next = nullptr;
+      First = Last = X;
+      Size = 1;
+    } else {
+      X->Next = nullptr;
+      Last->Next = X;
+      Last = X;
+      Size++;
+    }
+  }
+
+  void push_front(Item *X) {
+    if (empty()) {
+      X->Next = nullptr;
+      First = Last = X;
+      Size = 1;
+    } else {
+      X->Next = First;
+      First = X;
+      Size++;
+    }
+  }
+
+  void pop_front() {
+    DCHECK(!empty());
+    First = First->Next;
+    if (!First)
+      Last = nullptr;
+    Size--;
+  }
+
+  void extract(Item *Prev, Item *X) {
+    DCHECK(!empty());
+    DCHECK_NE(Prev, nullptr);
+    DCHECK_NE(X, nullptr);
+    DCHECK_EQ(Prev->Next, X);
+    Prev->Next = X->Next;
+    if (Last == X)
+      Last = Prev;
+    Size--;
+  }
+
+  Item *front() { return First; }
+  const Item *front() const { return First; }
+  Item *back() { return Last; }
+  const Item *back() const { return Last; }
+
+  void append_front(IntrusiveList<Item> *L) {
+    DCHECK_NE(this, L);
+    if (L->empty())
+      return;
+    if (empty()) {
+      *this = *L;
+    } else if (!L->empty()) {
+      L->Last->Next = First;
+      First = L->First;
+      Size += L->size();
+    }
+    L->clear();
+  }
+
+  void append_back(IntrusiveList<Item> *L) {
+    DCHECK_NE(this, L);
+    if (L->empty())
+      return;
+    if (empty()) {
+      *this = *L;
+    } else {
+      Last->Next = L->First;
+      Last = L->Last;
+      Size += L->size();
+    }
+    L->clear();
+  }
+
+  void checkConsistency() {
+    if (Size == 0) {
+      CHECK_EQ(First, 0);
+      CHECK_EQ(Last, 0);
+    } else {
+      uptr count = 0;
+      for (Item *I = First;; I = I->Next) {
+        count++;
+        if (I == Last)
+          break;
+      }
+      CHECK_EQ(size(), count);
+      CHECK_EQ(Last->Next, 0);
+    }
+  }
+
+  template <class ItemT> class IteratorBase {
+  public:
+    explicit IteratorBase(ItemT *CurrentItem) : Current(CurrentItem) {}
+    IteratorBase &operator++() {
+      Current = Current->Next;
+      return *this;
+    }
+    bool operator!=(IteratorBase Other) const {
+      return Current != Other.Current;
+    }
+    ItemT &operator*() { return *Current; }
+
+  private:
+    ItemT *Current;
+  };
+
+  typedef IteratorBase<Item> Iterator;
+  typedef IteratorBase<const Item> ConstIterator;
+
+  Iterator begin() { return Iterator(First); }
+  Iterator end() { return Iterator(nullptr); }
+
+  ConstIterator begin() const { return ConstIterator(First); }
+  ConstIterator end() const { return ConstIterator(nullptr); }
+
+private:
+  uptr Size;
+  Item *First;
+  Item *Last;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LIST_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
new file mode 100644
index 0000000..2acc288
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -0,0 +1,181 @@
+//===-- local_cache.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LOCAL_CACHE_H_
+#define SCUDO_LOCAL_CACHE_H_
+
+#include "internal_defs.h"
+#include "report.h"
+#include "stats.h"
+
+namespace scudo {
+
+template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
+  typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+
+  struct TransferBatch {
+    static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
+    void setFromArray(void **Array, u32 N) {
+      DCHECK_LE(N, MaxNumCached);
+      for (u32 I = 0; I < N; I++)
+        Batch[I] = Array[I];
+      Count = N;
+    }
+    void clear() { Count = 0; }
+    void add(void *P) {
+      DCHECK_LT(Count, MaxNumCached);
+      Batch[Count++] = P;
+    }
+    void copyToArray(void **Array) const {
+      for (u32 I = 0; I < Count; I++)
+        Array[I] = Batch[I];
+    }
+    u32 getCount() const { return Count; }
+    void *get(u32 I) const {
+      DCHECK_LE(I, Count);
+      return Batch[I];
+    }
+    static u32 getMaxCached(uptr Size) {
+      return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
+    }
+    TransferBatch *Next;
+
+  private:
+    u32 Count;
+    void *Batch[MaxNumCached];
+  };
+
+  void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
+    Stats.initLinkerInitialized();
+    if (S)
+      S->link(&Stats);
+    Allocator = A;
+  }
+
+  void init(GlobalStats *S, SizeClassAllocator *A) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(S, A);
+  }
+
+  void destroy(GlobalStats *S) {
+    drain();
+    if (S)
+      S->unlink(&Stats);
+  }
+
+  void *allocate(uptr ClassId) {
+    CHECK_LT(ClassId, NumClasses);
+    PerClass *C = &PerClassArray[ClassId];
+    if (C->Count == 0) {
+      if (UNLIKELY(!refill(C, ClassId)))
+        return nullptr;
+      DCHECK_GT(C->Count, 0);
+    }
+    // We read ClassSize first before accessing Chunks because it's adjacent to
+    // Count, while Chunks might be further off (depending on Count). That keeps
+    // the memory accesses in close quarters.
+    const uptr ClassSize = C->ClassSize;
+    void *P = C->Chunks[--C->Count];
+    // The jury is still out as to whether any kind of PREFETCH here increases
+    // performance. It definitely decreases performance on Android though.
+    // if (!SCUDO_ANDROID) PREFETCH(P);
+    Stats.add(StatAllocated, ClassSize);
+    return P;
+  }
+
+  void deallocate(uptr ClassId, void *P) {
+    CHECK_LT(ClassId, NumClasses);
+    PerClass *C = &PerClassArray[ClassId];
+    // We still have to initialize the cache in the event that the first heap
+    // operation in a thread is a deallocation.
+    initCacheMaybe(C);
+    if (C->Count == C->MaxCount)
+      drain(C, ClassId);
+    // See comment in allocate() about memory accesses.
+    const uptr ClassSize = C->ClassSize;
+    C->Chunks[C->Count++] = P;
+    Stats.sub(StatAllocated, ClassSize);
+  }
+
+  void drain() {
+    for (uptr I = 0; I < NumClasses; I++) {
+      PerClass *C = &PerClassArray[I];
+      while (C->Count > 0)
+        drain(C, I);
+    }
+  }
+
+  TransferBatch *createBatch(uptr ClassId, void *B) {
+    if (ClassId != SizeClassMap::BatchClassId)
+      B = allocate(SizeClassMap::BatchClassId);
+    return reinterpret_cast<TransferBatch *>(B);
+  }
+
+  LocalStats &getStats() { return Stats; }
+
+private:
+  static const uptr NumClasses = SizeClassMap::NumClasses;
+  struct PerClass {
+    u32 Count;
+    u32 MaxCount;
+    uptr ClassSize;
+    void *Chunks[2 * TransferBatch::MaxNumCached];
+  };
+  PerClass PerClassArray[NumClasses];
+  LocalStats Stats;
+  SizeClassAllocator *Allocator;
+
+  ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
+    if (LIKELY(C->MaxCount))
+      return;
+    initCache();
+    DCHECK_NE(C->MaxCount, 0U);
+  }
+
+  NOINLINE void initCache() {
+    for (uptr I = 0; I < NumClasses; I++) {
+      PerClass *P = &PerClassArray[I];
+      const uptr Size = SizeClassAllocator::getSizeByClassId(I);
+      P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
+      P->ClassSize = Size;
+    }
+  }
+
+  void destroyBatch(uptr ClassId, void *B) {
+    if (ClassId != SizeClassMap::BatchClassId)
+      deallocate(SizeClassMap::BatchClassId, B);
+  }
+
+  NOINLINE bool refill(PerClass *C, uptr ClassId) {
+    initCacheMaybe(C);
+    TransferBatch *B = Allocator->popBatch(this, ClassId);
+    if (UNLIKELY(!B))
+      return false;
+    DCHECK_GT(B->getCount(), 0);
+    B->copyToArray(C->Chunks);
+    C->Count = B->getCount();
+    destroyBatch(ClassId, B);
+    return true;
+  }
+
+  NOINLINE void drain(PerClass *C, uptr ClassId) {
+    const u32 Count = Min(C->MaxCount / 2, C->Count);
+    const uptr FirstIndexToDrain = C->Count - Count;
+    TransferBatch *B = createBatch(ClassId, C->Chunks[FirstIndexToDrain]);
+    if (UNLIKELY(!B))
+      reportOutOfMemory(
+          SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId));
+    B->setFromArray(&C->Chunks[FirstIndexToDrain], Count);
+    C->Count -= Count;
+    Allocator->pushBatch(ClassId, B);
+  }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LOCAL_CACHE_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
new file mode 100644
index 0000000..b6dc918
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
@@ -0,0 +1,73 @@
+//===-- mutex.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MUTEX_H_
+#define SCUDO_MUTEX_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+
+#include <string.h>
+
+#if SCUDO_FUCHSIA
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#endif
+
+namespace scudo {
+
+class HybridMutex {
+public:
+  void init() { memset(this, 0, sizeof(*this)); }
+  bool tryLock();
+  NOINLINE void lock() {
+    if (tryLock())
+      return;
+      // The compiler may try to fully unroll the loop, ending up in a
+      // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
+      // is large, ugly and unneeded, a compact loop is better for our purpose
+      // here. Use a pragma to tell the compiler not to unroll the loop.
+#ifdef __clang__
+#pragma nounroll
+#endif
+    for (u8 I = 0U; I < NumberOfTries; I++) {
+      yieldProcessor(NumberOfYields);
+      if (tryLock())
+        return;
+    }
+    lockSlow();
+  }
+  void unlock();
+
+private:
+  static constexpr u8 NumberOfTries = 10U;
+  static constexpr u8 NumberOfYields = 10U;
+
+#if SCUDO_LINUX
+  atomic_u32 M;
+#elif SCUDO_FUCHSIA
+  sync_mutex_t M;
+#endif
+
+  void lockSlow();
+};
+
+class ScopedLock {
+public:
+  explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
+  ~ScopedLock() { Mutex.unlock(); }
+
+private:
+  HybridMutex &Mutex;
+
+  ScopedLock(const ScopedLock &) = delete;
+  void operator=(const ScopedLock &) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_MUTEX_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/platform.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
new file mode 100644
index 0000000..a897a56
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
@@ -0,0 +1,70 @@
+//===-- platform.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PLATFORM_H_
+#define SCUDO_PLATFORM_H_
+
+#if defined(__linux__)
+#define SCUDO_LINUX 1
+#else
+#define SCUDO_LINUX 0
+#endif
+
+#if defined(__ANDROID__)
+#define SCUDO_ANDROID 1
+#else
+#define SCUDO_ANDROID 0
+#endif
+
+#if defined(__Fuchsia__)
+#define SCUDO_FUCHSIA 1
+#else
+#define SCUDO_FUCHSIA 0
+#endif
+
+#if __LP64__
+#define SCUDO_WORDSIZE 64U
+#else
+#define SCUDO_WORDSIZE 32U
+#endif
+
+#if SCUDO_WORDSIZE == 64U
+#define FIRST_32_SECOND_64(a, b) (b)
+#else
+#define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#ifndef SCUDO_CAN_USE_PRIMARY64
+#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
+#endif
+
+#ifndef SCUDO_MIN_ALIGNMENT_LOG
+// We force malloc-type functions to be aligned to std::max_align_t, but there
+// is no reason why the minimum alignment for all other functions can't be 8
+// bytes. Except obviously for applications making incorrect assumptions.
+// TODO(kostyak): define SCUDO_MIN_ALIGNMENT_LOG 3
+#define SCUDO_MIN_ALIGNMENT_LOG FIRST_32_SECOND_64(3, 4)
+#endif
+
+#if defined(__aarch64__)
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
+#else
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// Older gcc have issues aligning to a constexpr, and require an integer.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define SCUDO_CACHE_LINE_SIZE 128
+#else
+#define SCUDO_CACHE_LINE_SIZE 64
+#endif
+
+#define SCUDO_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+
+#endif // SCUDO_PLATFORM_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
new file mode 100644
index 0000000..2b2fa8b
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
@@ -0,0 +1,401 @@
+//===-- primary32.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY32_H_
+#define SCUDO_PRIMARY32_H_
+
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "release.h"
+#include "report.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
+//
+// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
+// boundary, and keeps a bytemap of the mappable address space to track the size
+// class they are associated with.
+//
+// Mapped regions are split into equally sized Blocks according to the size
+// class they belong to, and the associated pointers are shuffled to prevent any
+// predictable address pattern (the predictability increases with the block
+// size).
+//
+// Regions for size class 0 are special and used to hold TransferBatches, which
+// allow to transfer arrays of pointers from the global size class freelist to
+// the thread specific freelist for said class, and back.
+//
+// Memory used by this allocator is never unmapped but can be partially
+// reclaimed if the platform allows for it.
+
+template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
+public:
+  typedef SizeClassMapT SizeClassMap;
+  // Regions should be large enough to hold the largest Block.
+  COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize);
+  typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
+  typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+  typedef typename CacheT::TransferBatch TransferBatch;
+
+  static uptr getSizeByClassId(uptr ClassId) {
+    return (ClassId == SizeClassMap::BatchClassId)
+               ? sizeof(TransferBatch)
+               : SizeClassMap::getSizeByClassId(ClassId);
+  }
+
+  static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+  void initLinkerInitialized(s32 ReleaseToOsInterval) {
+    if (SCUDO_FUCHSIA)
+      reportError("SizeClassAllocator32 is not supported on Fuchsia");
+
+    PossibleRegions.initLinkerInitialized();
+    MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
+
+    u32 Seed;
+    if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+      Seed =
+          static_cast<u32>(getMonotonicTime() ^
+                           (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
+    const uptr PageSize = getPageSizeCached();
+    for (uptr I = 0; I < NumClasses; I++) {
+      SizeClassInfo *Sci = getSizeClassInfo(I);
+      Sci->RandState = getRandomU32(&Seed);
+      // See comment in the 64-bit primary about releasing smaller size classes.
+      Sci->CanRelease = (ReleaseToOsInterval > 0) &&
+                        (I != SizeClassMap::BatchClassId) &&
+                        (getSizeByClassId(I) >= (PageSize / 32));
+    }
+    ReleaseToOsIntervalMs = ReleaseToOsInterval;
+  }
+  void init(s32 ReleaseToOsInterval) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(ReleaseToOsInterval);
+  }
+
+  void unmapTestOnly() {
+    while (NumberOfStashedRegions > 0)
+      unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+            RegionSize);
+    // TODO(kostyak): unmap the TransferBatch regions as well.
+    for (uptr I = 0; I < NumRegions; I++)
+      if (PossibleRegions[I])
+        unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
+    PossibleRegions.unmapTestOnly();
+  }
+
+  TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+    DCHECK_LT(ClassId, NumClasses);
+    SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+    ScopedLock L(Sci->Mutex);
+    TransferBatch *B = Sci->FreeList.front();
+    if (B)
+      Sci->FreeList.pop_front();
+    else {
+      B = populateFreeList(C, ClassId, Sci);
+      if (UNLIKELY(!B))
+        return nullptr;
+    }
+    DCHECK_GT(B->getCount(), 0);
+    Sci->Stats.PoppedBlocks += B->getCount();
+    return B;
+  }
+
+  void pushBatch(uptr ClassId, TransferBatch *B) {
+    DCHECK_LT(ClassId, NumClasses);
+    DCHECK_GT(B->getCount(), 0);
+    SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+    ScopedLock L(Sci->Mutex);
+    Sci->FreeList.push_front(B);
+    Sci->Stats.PushedBlocks += B->getCount();
+    if (Sci->CanRelease)
+      releaseToOSMaybe(Sci, ClassId);
+  }
+
+  void disable() {
+    for (uptr I = 0; I < NumClasses; I++)
+      getSizeClassInfo(I)->Mutex.lock();
+  }
+
+  void enable() {
+    for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
+      getSizeClassInfo(I)->Mutex.unlock();
+  }
+
+  template <typename F> void iterateOverBlocks(F Callback) {
+    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+      if (PossibleRegions[I]) {
+        const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
+        const uptr From = I * RegionSize;
+        const uptr To = From + (RegionSize / BlockSize) * BlockSize;
+        for (uptr Block = From; Block < To; Block += BlockSize)
+          Callback(Block);
+      }
+  }
+
+  void printStats() {
+    // TODO(kostyak): get the RSS per region.
+    uptr TotalMapped = 0;
+    uptr PoppedBlocks = 0;
+    uptr PushedBlocks = 0;
+    for (uptr I = 0; I < NumClasses; I++) {
+      SizeClassInfo *Sci = getSizeClassInfo(I);
+      TotalMapped += Sci->AllocatedUser;
+      PoppedBlocks += Sci->Stats.PoppedBlocks;
+      PushedBlocks += Sci->Stats.PushedBlocks;
+    }
+    Printf("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
+           "remains %zu\n",
+           TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
+    for (uptr I = 0; I < NumClasses; I++)
+      printStats(I, 0);
+  }
+
+  void releaseToOS() {
+    for (uptr I = 0; I < NumClasses; I++) {
+      if (I == SizeClassMap::BatchClassId)
+        continue;
+      SizeClassInfo *Sci = getSizeClassInfo(I);
+      ScopedLock L(Sci->Mutex);
+      releaseToOSMaybe(Sci, I, /*Force=*/true);
+    }
+  }
+
+private:
+  static const uptr NumClasses = SizeClassMap::NumClasses;
+  static const uptr RegionSize = 1UL << RegionSizeLog;
+  static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
+#if SCUDO_WORDSIZE == 32U
+  typedef FlatByteMap<NumRegions> ByteMap;
+#else
+  typedef TwoLevelByteMap<(NumRegions >> 12), 1UL << 12> ByteMap;
+#endif
+
+  struct SizeClassStats {
+    uptr PoppedBlocks;
+    uptr PushedBlocks;
+  };
+
+  struct ReleaseToOsInfo {
+    uptr PushedBlocksAtLastRelease;
+    uptr RangesReleased;
+    uptr LastReleasedBytes;
+    u64 LastReleaseAtNs;
+  };
+
+  struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
+    HybridMutex Mutex;
+    IntrusiveList<TransferBatch> FreeList;
+    SizeClassStats Stats;
+    bool CanRelease;
+    u32 RandState;
+    uptr AllocatedUser;
+    ReleaseToOsInfo ReleaseInfo;
+  };
+  COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+
+  uptr computeRegionId(uptr Mem) {
+    const uptr Id = Mem >> RegionSizeLog;
+    CHECK_LT(Id, NumRegions);
+    return Id;
+  }
+
+  uptr allocateRegionSlow() {
+    uptr MapSize = 2 * RegionSize;
+    const uptr MapBase = reinterpret_cast<uptr>(
+        map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
+    if (UNLIKELY(!MapBase))
+      return 0;
+    const uptr MapEnd = MapBase + MapSize;
+    uptr Region = MapBase;
+    if (isAligned(Region, RegionSize)) {
+      ScopedLock L(RegionsStashMutex);
+      if (NumberOfStashedRegions < MaxStashedRegions)
+        RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
+      else
+        MapSize = RegionSize;
+    } else {
+      Region = roundUpTo(MapBase, RegionSize);
+      unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
+      MapSize = RegionSize;
+    }
+    const uptr End = Region + MapSize;
+    if (End != MapEnd)
+      unmap(reinterpret_cast<void *>(End), MapEnd - End);
+    return Region;
+  }
+
+  uptr allocateRegion(uptr ClassId) {
+    DCHECK_LT(ClassId, NumClasses);
+    uptr Region = 0;
+    {
+      ScopedLock L(RegionsStashMutex);
+      if (NumberOfStashedRegions > 0)
+        Region = RegionsStash[--NumberOfStashedRegions];
+    }
+    if (!Region)
+      Region = allocateRegionSlow();
+    if (LIKELY(Region)) {
+      if (ClassId) {
+        const uptr RegionIndex = computeRegionId(Region);
+        if (RegionIndex < MinRegionIndex)
+          MinRegionIndex = RegionIndex;
+        if (RegionIndex > MaxRegionIndex)
+          MaxRegionIndex = RegionIndex;
+        PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
+      }
+    }
+    return Region;
+  }
+
+  SizeClassInfo *getSizeClassInfo(uptr ClassId) {
+    DCHECK_LT(ClassId, NumClasses);
+    return &SizeClassInfoArray[ClassId];
+  }
+
+  bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
+                       TransferBatch **CurrentBatch, u32 MaxCount,
+                       void **PointersArray, u32 Count) {
+    if (ClassId != SizeClassMap::BatchClassId)
+      shuffle(PointersArray, Count, &Sci->RandState);
+    TransferBatch *B = *CurrentBatch;
+    for (uptr I = 0; I < Count; I++) {
+      if (B && B->getCount() == MaxCount) {
+        Sci->FreeList.push_back(B);
+        B = nullptr;
+      }
+      if (!B) {
+        B = C->createBatch(ClassId, PointersArray[I]);
+        if (UNLIKELY(!B))
+          return false;
+        B->clear();
+      }
+      B->add(PointersArray[I]);
+    }
+    *CurrentBatch = B;
+    return true;
+  }
+
+  NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
+                                           SizeClassInfo *Sci) {
+    const uptr Region = allocateRegion(ClassId);
+    if (UNLIKELY(!Region))
+      return nullptr;
+    C->getStats().add(StatMapped, RegionSize);
+    const uptr Size = getSizeByClassId(ClassId);
+    const u32 MaxCount = TransferBatch::getMaxCached(Size);
+    DCHECK_GT(MaxCount, 0);
+    const uptr NumberOfBlocks = RegionSize / Size;
+    DCHECK_GT(NumberOfBlocks, 0);
+    TransferBatch *B = nullptr;
+    constexpr uptr ShuffleArraySize = 48;
+    void *ShuffleArray[ShuffleArraySize];
+    u32 Count = 0;
+    const uptr AllocatedUser = NumberOfBlocks * Size;
+    for (uptr I = Region; I < Region + AllocatedUser; I += Size) {
+      ShuffleArray[Count++] = reinterpret_cast<void *>(I);
+      if (Count == ShuffleArraySize) {
+        if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
+                                      ShuffleArray, Count)))
+          return nullptr;
+        Count = 0;
+      }
+    }
+    if (Count) {
+      if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
+                                    Count)))
+        return nullptr;
+    }
+    DCHECK(B);
+    DCHECK_GT(B->getCount(), 0);
+    Sci->AllocatedUser += AllocatedUser;
+    if (Sci->CanRelease)
+      Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+    return B;
+  }
+
+  void printStats(uptr ClassId, uptr Rss) {
+    SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+    if (Sci->AllocatedUser == 0)
+      return;
+    const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
+    const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
+    Printf("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: %6zu"
+           " avail: %6zu rss: %6zuK\n",
+           ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
+           Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
+           AvailableChunks, Rss >> 10);
+  }
+
+  NOINLINE void releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
+                                 bool Force = false) {
+    const uptr BlockSize = getSizeByClassId(ClassId);
+    const uptr PageSize = getPageSizeCached();
+
+    CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+    const uptr N = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
+    if (N * BlockSize < PageSize)
+      return; // No chance to release anything.
+    if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
+            BlockSize <
+        PageSize) {
+      return; // Nothing new to release.
+    }
+
+    if (!Force) {
+      const s32 IntervalMs = ReleaseToOsIntervalMs;
+      if (IntervalMs < 0)
+        return;
+      if (Sci->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+          getMonotonicTime()) {
+        return; // Memory was returned recently.
+      }
+    }
+
+    // TODO(kostyak): currently not ideal as we loop over all regions and
+    // iterate multiple times over the same freelist if a ClassId spans multiple
+    // regions. But it will have to do for now.
+    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
+      if (PossibleRegions[I] == ClassId) {
+        ReleaseRecorder Recorder(I * RegionSize);
+        releaseFreeMemoryToOS(&Sci->FreeList, I * RegionSize,
+                              RegionSize / PageSize, BlockSize, &Recorder);
+        if (Recorder.getReleasedRangesCount() > 0) {
+          Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+          Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+          Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+        }
+      }
+    }
+    Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+  }
+
+  SizeClassInfo SizeClassInfoArray[NumClasses];
+
+  ByteMap PossibleRegions;
+  // Keep track of the lowest & highest regions allocated to avoid looping
+  // through the whole NumRegions.
+  uptr MinRegionIndex;
+  uptr MaxRegionIndex;
+  s32 ReleaseToOsIntervalMs;
+  // Unless several threads request regions simultaneously from different size
+  // classes, the stash rarely contains more than 1 entry.
+  static constexpr uptr MaxStashedRegions = 4;
+  HybridMutex RegionsStashMutex;
+  uptr NumberOfStashedRegions;
+  uptr RegionsStash[MaxStashedRegions];
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY32_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
new file mode 100644
index 0000000..035182b
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
@@ -0,0 +1,381 @@
+//===-- primary64.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY64_H_
+#define SCUDO_PRIMARY64_H_
+
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "release.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
+//
+// It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
+// Regions, specific to each size class. Note that the base of that mapping is
+// random (based to the platform specific map() capabilities), and that each
+// Region actually starts at a random offset from its base.
+//
+// Regions are mapped incrementally on demand to fulfill allocation requests,
+// those mappings being split into equally sized Blocks based on the size class
+// they belong to. The Blocks created are shuffled to prevent predictable
+// address patterns (the predictability increases with the size of the Blocks).
+//
+// The 1st Region (for size class 0) holds the TransferBatches. This is a
+// structure used to transfer arrays of available pointers from the class size
+// freelist to the thread specific freelist, and back.
+//
+// The memory used by this allocator is never unmapped, but can be partially
+// released it the platform allows for it.
+
+template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
+public:
+  typedef SizeClassMapT SizeClassMap;
+  typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog> ThisT;
+  typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+  typedef typename CacheT::TransferBatch TransferBatch;
+
+  static uptr getSizeByClassId(uptr ClassId) {
+    return (ClassId == SizeClassMap::BatchClassId)
+               ? sizeof(TransferBatch)
+               : SizeClassMap::getSizeByClassId(ClassId);
+  }
+
+  static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+  void initLinkerInitialized(s32 ReleaseToOsInterval) {
+    // Reserve the space required for the Primary.
+    PrimaryBase = reinterpret_cast<uptr>(
+        map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
+
+    RegionInfoArray = reinterpret_cast<RegionInfo *>(
+        map(nullptr, sizeof(RegionInfo) * NumClasses, "scudo:regioninfo"));
+    DCHECK_EQ(reinterpret_cast<uptr>(RegionInfoArray) % SCUDO_CACHE_LINE_SIZE,
+              0);
+
+    u32 Seed;
+    if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+      Seed = static_cast<u32>(getMonotonicTime() ^ (PrimaryBase >> 12));
+    const uptr PageSize = getPageSizeCached();
+    for (uptr I = 0; I < NumClasses; I++) {
+      RegionInfo *Region = getRegionInfo(I);
+      // The actual start of a region is offseted by a random number of pages.
+      Region->RegionBeg =
+          getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
+      // Releasing smaller size classes doesn't necessarily yield to a
+      // meaningful RSS impact: there are more blocks per page, they are
+      // randomized around, and thus pages are less likely to be entirely empty.
+      // On top of this, attempting to release those require more iterations and
+      // memory accesses which ends up being fairly costly. The current lower
+      // limit is mostly arbitrary and based on empirical observations.
+      // TODO(kostyak): make the lower limit a runtime option
+      Region->CanRelease = (ReleaseToOsInterval > 0) &&
+                           (I != SizeClassMap::BatchClassId) &&
+                           (getSizeByClassId(I) >= (PageSize / 32));
+      Region->RandState = getRandomU32(&Seed);
+    }
+    ReleaseToOsIntervalMs = ReleaseToOsInterval;
+  }
+  void init(s32 ReleaseToOsInterval) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(ReleaseToOsInterval);
+  }
+
+  void unmapTestOnly() {
+    unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
+    unmap(reinterpret_cast<void *>(RegionInfoArray),
+          sizeof(RegionInfo) * NumClasses);
+  }
+
+  TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+    DCHECK_LT(ClassId, NumClasses);
+    RegionInfo *Region = getRegionInfo(ClassId);
+    ScopedLock L(Region->Mutex);
+    TransferBatch *B = Region->FreeList.front();
+    if (B)
+      Region->FreeList.pop_front();
+    else {
+      B = populateFreeList(C, ClassId, Region);
+      if (UNLIKELY(!B))
+        return nullptr;
+    }
+    DCHECK_GT(B->getCount(), 0);
+    Region->Stats.PoppedBlocks += B->getCount();
+    return B;
+  }
+
+  void pushBatch(uptr ClassId, TransferBatch *B) {
+    DCHECK_GT(B->getCount(), 0);
+    RegionInfo *Region = getRegionInfo(ClassId);
+    ScopedLock L(Region->Mutex);
+    Region->FreeList.push_front(B);
+    Region->Stats.PushedBlocks += B->getCount();
+    if (Region->CanRelease)
+      releaseToOSMaybe(Region, ClassId);
+  }
+
+  void disable() {
+    for (uptr I = 0; I < NumClasses; I++)
+      getRegionInfo(I)->Mutex.lock();
+  }
+
+  void enable() {
+    for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
+      getRegionInfo(I)->Mutex.unlock();
+  }
+
+  template <typename F> void iterateOverBlocks(F Callback) const {
+    for (uptr I = 1; I < NumClasses; I++) {
+      const RegionInfo *Region = getRegionInfo(I);
+      const uptr BlockSize = getSizeByClassId(I);
+      const uptr From = Region->RegionBeg;
+      const uptr To = From + Region->AllocatedUser;
+      for (uptr Block = From; Block < To; Block += BlockSize)
+        Callback(Block);
+    }
+  }
+
+  void printStats() const {
+    // TODO(kostyak): get the RSS per region.
+    uptr TotalMapped = 0;
+    uptr PoppedBlocks = 0;
+    uptr PushedBlocks = 0;
+    for (uptr I = 0; I < NumClasses; I++) {
+      RegionInfo *Region = getRegionInfo(I);
+      if (Region->MappedUser)
+        TotalMapped += Region->MappedUser;
+      PoppedBlocks += Region->Stats.PoppedBlocks;
+      PushedBlocks += Region->Stats.PushedBlocks;
+    }
+    Printf("Stats: Primary64: %zuM mapped (%zuM rss) in %zu allocations; "
+           "remains %zu\n",
+           TotalMapped >> 20, 0, PoppedBlocks, PoppedBlocks - PushedBlocks);
+
+    for (uptr I = 0; I < NumClasses; I++)
+      printStats(I, 0);
+  }
+
+  void releaseToOS() {
+    for (uptr I = 0; I < NumClasses; I++) {
+      if (I == SizeClassMap::BatchClassId)
+        continue;
+      RegionInfo *Region = getRegionInfo(I);
+      ScopedLock L(Region->Mutex);
+      releaseToOSMaybe(Region, I, /*Force=*/true);
+    }
+  }
+
+private:
+  static const uptr RegionSize = 1UL << RegionSizeLog;
+  static const uptr NumClasses = SizeClassMap::NumClasses;
+  static const uptr PrimarySize = RegionSize * NumClasses;
+
+  // Call map for user memory with at least this size.
+  static const uptr MapSizeIncrement = 1UL << 16;
+
+  struct RegionStats {
+    uptr PoppedBlocks;
+    uptr PushedBlocks;
+  };
+
+  struct ReleaseToOsInfo {
+    uptr PushedBlocksAtLastRelease;
+    uptr RangesReleased;
+    uptr LastReleasedBytes;
+    u64 LastReleaseAtNs;
+  };
+
+  struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
+    HybridMutex Mutex;
+    IntrusiveList<TransferBatch> FreeList;
+    RegionStats Stats;
+    bool CanRelease;
+    bool Exhausted;
+    u32 RandState;
+    uptr RegionBeg;
+    uptr MappedUser;    // Bytes mapped for user memory.
+    uptr AllocatedUser; // Bytes allocated for user memory.
+    MapPlatformData Data;
+    ReleaseToOsInfo ReleaseInfo;
+  };
+  COMPILER_CHECK(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+
+  uptr PrimaryBase;
+  RegionInfo *RegionInfoArray;
+  MapPlatformData Data;
+  s32 ReleaseToOsIntervalMs;
+
+  RegionInfo *getRegionInfo(uptr ClassId) const {
+    DCHECK_LT(ClassId, NumClasses);
+    return &RegionInfoArray[ClassId];
+  }
+
+  uptr getRegionBaseByClassId(uptr ClassId) const {
+    return PrimaryBase + (ClassId << RegionSizeLog);
+  }
+
+  bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId,
+                       TransferBatch **CurrentBatch, u32 MaxCount,
+                       void **PointersArray, u32 Count) {
+    // No need to shuffle the batches size class.
+    if (ClassId != SizeClassMap::BatchClassId)
+      shuffle(PointersArray, Count, &Region->RandState);
+    TransferBatch *B = *CurrentBatch;
+    for (uptr I = 0; I < Count; I++) {
+      if (B && B->getCount() == MaxCount) {
+        Region->FreeList.push_back(B);
+        B = nullptr;
+      }
+      if (!B) {
+        B = C->createBatch(ClassId, PointersArray[I]);
+        if (UNLIKELY(!B))
+          return false;
+        B->clear();
+      }
+      B->add(PointersArray[I]);
+    }
+    *CurrentBatch = B;
+    return true;
+  }
+
+  NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
+                                           RegionInfo *Region) {
+    const uptr Size = getSizeByClassId(ClassId);
+    const u32 MaxCount = TransferBatch::getMaxCached(Size);
+
+    const uptr RegionBeg = Region->RegionBeg;
+    const uptr MappedUser = Region->MappedUser;
+    const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
+    // Map more space for blocks, if necessary.
+    if (LIKELY(TotalUserBytes > MappedUser)) {
+      // Do the mmap for the user memory.
+      const uptr UserMapSize =
+          roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
+      const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
+      if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
+        if (!Region->Exhausted) {
+          Region->Exhausted = true;
+          printStats();
+          Printf(
+              "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
+              RegionSize >> 20, Size);
+        }
+        return nullptr;
+      }
+      if (MappedUser == 0)
+        Region->Data = Data;
+      if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
+                        UserMapSize, "scudo:primary",
+                        MAP_ALLOWNOMEM | MAP_RESIZABLE, &Region->Data)))
+        return nullptr;
+      Region->MappedUser += UserMapSize;
+      C->getStats().add(StatMapped, UserMapSize);
+    }
+
+    const uptr NumberOfBlocks = Min(
+        8UL * MaxCount, (Region->MappedUser - Region->AllocatedUser) / Size);
+    DCHECK_GT(NumberOfBlocks, 0);
+
+    TransferBatch *B = nullptr;
+    constexpr uptr ShuffleArraySize = 48;
+    void *ShuffleArray[ShuffleArraySize];
+    u32 Count = 0;
+    const uptr P = RegionBeg + Region->AllocatedUser;
+    const uptr AllocatedUser = NumberOfBlocks * Size;
+    for (uptr I = P; I < P + AllocatedUser; I += Size) {
+      ShuffleArray[Count++] = reinterpret_cast<void *>(I);
+      if (Count == ShuffleArraySize) {
+        if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
+                                      ShuffleArray, Count)))
+          return nullptr;
+        Count = 0;
+      }
+    }
+    if (Count) {
+      if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
+                                    ShuffleArray, Count)))
+        return nullptr;
+    }
+    DCHECK(B);
+    CHECK_GT(B->getCount(), 0);
+
+    Region->AllocatedUser += AllocatedUser;
+    Region->Exhausted = false;
+    if (Region->CanRelease)
+      Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
+    return B;
+  }
+
+  void printStats(uptr ClassId, uptr Rss) const {
+    RegionInfo *Region = getRegionInfo(ClassId);
+    if (Region->MappedUser == 0)
+      return;
+    const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
+    const uptr AvailableChunks =
+        Region->AllocatedUser / getSizeByClassId(ClassId);
+    Printf("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: "
+           "%6zu avail: %6zu rss: %6zuK releases: %6zu last released: %6zuK "
+           "region: 0x%zx (0x%zx)\n",
+           Region->Exhausted ? "F" : " ", ClassId, getSizeByClassId(ClassId),
+           Region->MappedUser >> 10, Region->Stats.PoppedBlocks,
+           Region->Stats.PushedBlocks, InUse, AvailableChunks, Rss >> 10,
+           Region->ReleaseInfo.RangesReleased,
+           Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
+           getRegionBaseByClassId(ClassId));
+  }
+
+  NOINLINE void releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
+                                 bool Force = false) {
+    const uptr BlockSize = getSizeByClassId(ClassId);
+    const uptr PageSize = getPageSizeCached();
+
+    CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
+    const uptr N = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
+    if (N * BlockSize < PageSize)
+      return; // No chance to release anything.
+    if ((Region->Stats.PushedBlocks -
+         Region->ReleaseInfo.PushedBlocksAtLastRelease) *
+            BlockSize <
+        PageSize) {
+      return; // Nothing new to release.
+    }
+
+    if (!Force) {
+      const s32 IntervalMs = ReleaseToOsIntervalMs;
+      if (IntervalMs < 0)
+        return;
+      if (Region->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+          getMonotonicTime()) {
+        return; // Memory was returned recently.
+      }
+    }
+
+    ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
+    releaseFreeMemoryToOS(&Region->FreeList, Region->RegionBeg,
+                          roundUpTo(Region->AllocatedUser, PageSize) / PageSize,
+                          BlockSize, &Recorder);
+
+    if (Recorder.getReleasedRangesCount() > 0) {
+      Region->ReleaseInfo.PushedBlocksAtLastRelease =
+          Region->Stats.PushedBlocks;
+      Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+      Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+    }
+    Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+  }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY64_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
new file mode 100644
index 0000000..bac36e0
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
@@ -0,0 +1,289 @@
+//===-- quarantine.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_QUARANTINE_H_
+#define SCUDO_QUARANTINE_H_
+
+#include "list.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+struct QuarantineBatch {
+  // With the following count, a batch (and the header that protects it) occupy
+  // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
+  static const u32 MaxCount = 1019;
+  QuarantineBatch *Next;
+  uptr Size;
+  u32 Count;
+  void *Batch[MaxCount];
+
+  void init(void *Ptr, uptr Size) {
+    Count = 1;
+    Batch[0] = Ptr;
+    this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
+  }
+
+  // The total size of quarantined nodes recorded in this batch.
+  uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
+
+  void push_back(void *Ptr, uptr Size) {
+    DCHECK_LT(Count, MaxCount);
+    Batch[Count++] = Ptr;
+    this->Size += Size;
+  }
+
+  bool canMerge(const QuarantineBatch *const From) const {
+    return Count + From->Count <= MaxCount;
+  }
+
+  void merge(QuarantineBatch *const From) {
+    DCHECK_LE(Count + From->Count, MaxCount);
+    DCHECK_GE(Size, sizeof(QuarantineBatch));
+
+    for (uptr I = 0; I < From->Count; ++I)
+      Batch[Count + I] = From->Batch[I];
+    Count += From->Count;
+    Size += From->getQuarantinedSize();
+
+    From->Count = 0;
+    From->Size = sizeof(QuarantineBatch);
+  }
+
+  void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
+};
+
+COMPILER_CHECK(sizeof(QuarantineBatch) <= (1U << 13)); // 8Kb.
+
+// Per-thread cache of memory blocks.
+template <typename Callback> class QuarantineCache {
+public:
+  void initLinkerInitialized() {}
+  void init() {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized();
+  }
+
+  // Total memory used, including internal accounting.
+  uptr getSize() const { return atomic_load_relaxed(&Size); }
+  // Memory used for internal accounting.
+  uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
+
+  void enqueue(Callback Cb, void *Ptr, uptr Size) {
+    if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
+      QuarantineBatch *B =
+          reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
+      DCHECK(B);
+      B->init(Ptr, Size);
+      enqueueBatch(B);
+    } else {
+      List.back()->push_back(Ptr, Size);
+      addToSize(Size);
+    }
+  }
+
+  void transfer(QuarantineCache *From) {
+    List.append_back(&From->List);
+    addToSize(From->getSize());
+    atomic_store_relaxed(&From->Size, 0);
+  }
+
+  void enqueueBatch(QuarantineBatch *B) {
+    List.push_back(B);
+    addToSize(B->Size);
+  }
+
+  QuarantineBatch *dequeueBatch() {
+    if (List.empty())
+      return nullptr;
+    QuarantineBatch *B = List.front();
+    List.pop_front();
+    subFromSize(B->Size);
+    return B;
+  }
+
+  void mergeBatches(QuarantineCache *ToDeallocate) {
+    uptr ExtractedSize = 0;
+    QuarantineBatch *Current = List.front();
+    while (Current && Current->Next) {
+      if (Current->canMerge(Current->Next)) {
+        QuarantineBatch *Extracted = Current->Next;
+        // Move all the chunks into the current batch.
+        Current->merge(Extracted);
+        DCHECK_EQ(Extracted->Count, 0);
+        DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
+        // Remove the next batch From the list and account for its Size.
+        List.extract(Current, Extracted);
+        ExtractedSize += Extracted->Size;
+        // Add it to deallocation list.
+        ToDeallocate->enqueueBatch(Extracted);
+      } else {
+        Current = Current->Next;
+      }
+    }
+    subFromSize(ExtractedSize);
+  }
+
+  void printStats() const {
+    uptr BatchCount = 0;
+    uptr TotalOverheadBytes = 0;
+    uptr TotalBytes = 0;
+    uptr TotalQuarantineChunks = 0;
+    for (const QuarantineBatch &Batch : List) {
+      BatchCount++;
+      TotalBytes += Batch.Size;
+      TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
+      TotalQuarantineChunks += Batch.Count;
+    }
+    const uptr QuarantineChunksCapacity =
+        BatchCount * QuarantineBatch::MaxCount;
+    const uptr ChunksUsagePercent =
+        (QuarantineChunksCapacity == 0)
+            ? 0
+            : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
+    const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
+    const uptr MemoryOverheadPercent =
+        (TotalQuarantinedBytes == 0)
+            ? 0
+            : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
+    Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
+           "chunks: %zd (capacity: %zd); %zd%% chunks used; %zd%% memory "
+           "overhead\n",
+           BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
+           QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
+  }
+
+private:
+  IntrusiveList<QuarantineBatch> List;
+  atomic_uptr Size;
+
+  void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
+  void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
+};
+
+// The callback interface is:
+// void Callback::recycle(Node *Ptr);
+// void *Callback::allocate(uptr Size);
+// void Callback::deallocate(void *Ptr);
+template <typename Callback, typename Node> class GlobalQuarantine {
+public:
+  typedef QuarantineCache<Callback> CacheT;
+
+  void initLinkerInitialized(uptr Size, uptr CacheSize) {
+    // Thread local quarantine size can be zero only when global quarantine size
+    // is zero (it allows us to perform just one atomic read per put() call).
+    CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
+
+    atomic_store_relaxed(&MaxSize, Size);
+    atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
+    atomic_store_relaxed(&MaxCacheSize, CacheSize);
+
+    Cache.initLinkerInitialized();
+  }
+  void init(uptr Size, uptr CacheSize) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(Size, CacheSize);
+  }
+
+  uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
+  uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
+
+  void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
+    C->enqueue(Cb, Ptr, Size);
+    if (C->getSize() > getCacheSize())
+      drain(C, Cb);
+  }
+
+  void NOINLINE drain(CacheT *C, Callback Cb) {
+    {
+      ScopedLock L(CacheMutex);
+      Cache.transfer(C);
+    }
+    if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock())
+      recycle(atomic_load_relaxed(&MinSize), Cb);
+  }
+
+  void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
+    {
+      ScopedLock L(CacheMutex);
+      Cache.transfer(C);
+    }
+    RecyleMutex.lock();
+    recycle(0, Cb);
+  }
+
+  void printStats() const {
+    // It assumes that the world is stopped, just as the allocator's printStats.
+    Printf("Quarantine limits: global: %zdM; thread local: %zdK\n",
+           getMaxSize() >> 20, getCacheSize() >> 10);
+    Cache.printStats();
+  }
+
+private:
+  // Read-only data.
+  alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
+  CacheT Cache;
+  alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;
+  atomic_uptr MinSize;
+  atomic_uptr MaxSize;
+  alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
+
+  void NOINLINE recycle(uptr MinSize, Callback Cb) {
+    CacheT Tmp;
+    Tmp.init();
+    {
+      ScopedLock L(CacheMutex);
+      // Go over the batches and merge partially filled ones to
+      // save some memory, otherwise batches themselves (since the memory used
+      // by them is counted against quarantine limit) can overcome the actual
+      // user's quarantined chunks, which diminishes the purpose of the
+      // quarantine.
+      const uptr CacheSize = Cache.getSize();
+      const uptr OverheadSize = Cache.getOverheadSize();
+      DCHECK_GE(CacheSize, OverheadSize);
+      // Do the merge only when overhead exceeds this predefined limit (might
+      // require some tuning). It saves us merge attempt when the batch list
+      // quarantine is unlikely to contain batches suitable for merge.
+      constexpr uptr OverheadThresholdPercents = 100;
+      if (CacheSize > OverheadSize &&
+          OverheadSize * (100 + OverheadThresholdPercents) >
+              CacheSize * OverheadThresholdPercents) {
+        Cache.mergeBatches(&Tmp);
+      }
+      // Extract enough chunks from the quarantine to get below the max
+      // quarantine size and leave some leeway for the newly quarantined chunks.
+      while (Cache.getSize() > MinSize)
+        Tmp.enqueueBatch(Cache.dequeueBatch());
+    }
+    RecyleMutex.unlock();
+    doRecycle(&Tmp, Cb);
+  }
+
+  void NOINLINE doRecycle(CacheT *C, Callback Cb) {
+    while (QuarantineBatch *B = C->dequeueBatch()) {
+      const u32 Seed = static_cast<u32>(
+          (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
+      B->shuffle(Seed);
+      constexpr uptr NumberOfPrefetch = 8UL;
+      CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
+      for (uptr I = 0; I < NumberOfPrefetch; I++)
+        PREFETCH(B->Batch[I]);
+      for (uptr I = 0, Count = B->Count; I < Count; I++) {
+        if (I + NumberOfPrefetch < Count)
+          PREFETCH(B->Batch[I + NumberOfPrefetch]);
+        Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
+      }
+      Cb.deallocate(B);
+    }
+  }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_QUARANTINE_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/release.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/release.h
new file mode 100644
index 0000000..4fe29fde
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/release.h
@@ -0,0 +1,262 @@
+//===-- release.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_RELEASE_H_
+#define SCUDO_RELEASE_H_
+
+#include "common.h"
+#include "list.h"
+
+namespace scudo {
+
+class ReleaseRecorder {
+public:
+  ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr)
+      : BaseAddress(BaseAddress), Data(Data) {}
+
+  uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+  uptr getReleasedBytes() const { return ReleasedBytes; }
+
+  // Releases [From, To) range of pages back to OS.
+  void releasePageRangeToOS(uptr From, uptr To) {
+    const uptr Size = To - From;
+    releasePagesToOS(BaseAddress, From, Size, Data);
+    ReleasedRangesCount++;
+    ReleasedBytes += Size;
+  }
+
+private:
+  uptr ReleasedRangesCount = 0;
+  uptr ReleasedBytes = 0;
+  uptr BaseAddress = 0;
+  MapPlatformData *Data = nullptr;
+};
+
+// A packed array of Counters. Each counter occupies 2^N bits, enough to store
+// counter's MaxValue. Ctor will try to allocate the required Buffer via map()
+// and the caller is expected to check whether the initialization was successful
+// by checking isAllocated() result. For the performance sake, none of the
+// accessors check the validity of the arguments, It is assumed that Index is
+// always in [0, N) range and the value is not incremented past MaxValue.
+class PackedCounterArray {
+public:
+  PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) {
+    CHECK_GT(NumCounters, 0);
+    CHECK_GT(MaxValue, 0);
+    constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
+    // Rounding counter storage size up to the power of two allows for using
+    // bit shifts calculating particular counter's Index and offset.
+    const uptr CounterSizeBits =
+        roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+    CHECK_LE(CounterSizeBits, MaxCounterBits);
+    CounterSizeBitsLog = getLog2(CounterSizeBits);
+    CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
+
+    const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
+    CHECK_GT(PackingRatio, 0);
+    PackingRatioLog = getLog2(PackingRatio);
+    BitOffsetMask = PackingRatio - 1;
+
+    BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >>
+                  PackingRatioLog) *
+                 sizeof(*Buffer);
+    Buffer = reinterpret_cast<uptr *>(
+        map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+  }
+  ~PackedCounterArray() {
+    if (isAllocated())
+      unmap(reinterpret_cast<void *>(Buffer), BufferSize);
+  }
+
+  bool isAllocated() const { return !!Buffer; }
+
+  uptr getCount() const { return N; }
+
+  uptr get(uptr I) const {
+    DCHECK_LT(I, N);
+    const uptr Index = I >> PackingRatioLog;
+    const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+    return (Buffer[Index] >> BitOffset) & CounterMask;
+  }
+
+  void inc(uptr I) const {
+    DCHECK_LT(get(I), CounterMask);
+    const uptr Index = I >> PackingRatioLog;
+    const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+    DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+    Buffer[Index] += static_cast<uptr>(1U) << BitOffset;
+  }
+
+  void incRange(uptr From, uptr To) const {
+    DCHECK_LE(From, To);
+    for (uptr I = From; I <= To; I++)
+      inc(I);
+  }
+
+  uptr getBufferSize() const { return BufferSize; }
+
+private:
+  const uptr N;
+  uptr CounterSizeBitsLog;
+  uptr CounterMask;
+  uptr PackingRatioLog;
+  uptr BitOffsetMask;
+
+  uptr BufferSize;
+  uptr *Buffer;
+};
+
+template <class ReleaseRecorderT> class FreePagesRangeTracker {
+public:
+  explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
+      : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
+
+  void processNextPage(bool Freed) {
+    if (Freed) {
+      if (!InRange) {
+        CurrentRangeStatePage = CurrentPage;
+        InRange = true;
+      }
+    } else {
+      closeOpenedRange();
+    }
+    CurrentPage++;
+  }
+
+  void finish() { closeOpenedRange(); }
+
+private:
+  void closeOpenedRange() {
+    if (InRange) {
+      Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+                                     (CurrentPage << PageSizeLog));
+      InRange = false;
+    }
+  }
+
+  ReleaseRecorderT *const Recorder;
+  const uptr PageSizeLog;
+  bool InRange = false;
+  uptr CurrentPage = 0;
+  uptr CurrentRangeStatePage = 0;
+};
+
+template <class TransferBatchT, class ReleaseRecorderT>
+NOINLINE void
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base,
+                      uptr AllocatedPagesCount, uptr BlockSize,
+                      ReleaseRecorderT *Recorder) {
+  const uptr PageSize = getPageSizeCached();
+
+  // Figure out the number of chunks per page and whether we can take a fast
+  // path (the number of chunks per page is the same for all pages).
+  uptr FullPagesBlockCountMax;
+  bool SameBlockCountPerPage;
+  if (BlockSize <= PageSize) {
+    if (PageSize % BlockSize == 0) {
+      // Same number of chunks per page, no cross overs.
+      FullPagesBlockCountMax = PageSize / BlockSize;
+      SameBlockCountPerPage = true;
+    } else if (BlockSize % (PageSize % BlockSize) == 0) {
+      // Some chunks are crossing page boundaries, which means that the page
+      // contains one or two partial chunks, but all pages contain the same
+      // number of chunks.
+      FullPagesBlockCountMax = PageSize / BlockSize + 1;
+      SameBlockCountPerPage = true;
+    } else {
+      // Some chunks are crossing page boundaries, which means that the page
+      // contains one or two partial chunks.
+      FullPagesBlockCountMax = PageSize / BlockSize + 2;
+      SameBlockCountPerPage = false;
+    }
+  } else {
+    if (BlockSize % PageSize == 0) {
+      // One chunk covers multiple pages, no cross overs.
+      FullPagesBlockCountMax = 1;
+      SameBlockCountPerPage = true;
+    } else {
+      // One chunk covers multiple pages, Some chunks are crossing page
+      // boundaries. Some pages contain one chunk, some contain two.
+      FullPagesBlockCountMax = 2;
+      SameBlockCountPerPage = false;
+    }
+  }
+
+  PackedCounterArray Counters(AllocatedPagesCount, FullPagesBlockCountMax);
+  if (!Counters.isAllocated())
+    return;
+
+  const uptr PageSizeLog = getLog2(PageSize);
+  const uptr End = Base + AllocatedPagesCount * PageSize;
+
+  // Iterate over free chunks and count how many free chunks affect each
+  // allocated page.
+  if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+    // Each chunk affects one page only.
+    for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+      for (u32 I = 0; I < (*It).getCount(); I++) {
+        const uptr P = reinterpret_cast<uptr>((*It).get(I));
+        if (P >= Base && P < End)
+          Counters.inc((P - Base) >> PageSizeLog);
+      }
+    }
+  } else {
+    // In all other cases chunks might affect more than one page.
+    for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+      for (u32 I = 0; I < (*It).getCount(); I++) {
+        const uptr P = reinterpret_cast<uptr>((*It).get(I));
+        if (P >= Base && P < End)
+          Counters.incRange((P - Base) >> PageSizeLog,
+                            (P - Base + BlockSize - 1) >> PageSizeLog);
+      }
+    }
+  }
+
+  // Iterate over pages detecting ranges of pages with chunk Counters equal
+  // to the expected number of chunks for the particular page.
+  FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
+  if (SameBlockCountPerPage) {
+    // Fast path, every page has the same number of chunks affecting it.
+    for (uptr I = 0; I < Counters.getCount(); I++)
+      RangeTracker.processNextPage(Counters.get(I) == FullPagesBlockCountMax);
+  } else {
+    // Slow path, go through the pages keeping count how many chunks affect
+    // each page.
+    const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
+    const uptr Pnc = Pn * BlockSize;
+    // The idea is to increment the current page pointer by the first chunk
+    // size, middle portion size (the portion of the page covered by chunks
+    // except the first and the last one) and then the last chunk size, adding
+    // up the number of chunks on the current page and checking on every step
+    // whether the page boundary was crossed.
+    uptr PrevPageBoundary = 0;
+    uptr CurrentBoundary = 0;
+    for (uptr I = 0; I < Counters.getCount(); I++) {
+      const uptr PageBoundary = PrevPageBoundary + PageSize;
+      uptr BlocksPerPage = Pn;
+      if (CurrentBoundary < PageBoundary) {
+        if (CurrentBoundary > PrevPageBoundary)
+          BlocksPerPage++;
+        CurrentBoundary += Pnc;
+        if (CurrentBoundary < PageBoundary) {
+          BlocksPerPage++;
+          CurrentBoundary += BlockSize;
+        }
+      }
+      PrevPageBoundary = PageBoundary;
+
+      RangeTracker.processNextPage(Counters.get(I) == BlocksPerPage);
+    }
+  }
+  RangeTracker.finish();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_RELEASE_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/report.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/report.cc
new file mode 100644
index 0000000..47cd951
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/report.cc
@@ -0,0 +1,192 @@
+//===-- report.cc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "report.h"
+
+#include "atomic_helpers.h"
+#include "string_utils.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedErrorReport {
+public:
+  ScopedErrorReport() : Message(512) { Message.append("Scudo ERROR: "); }
+  void append(const char *Format, ...) {
+    va_list Args;
+    va_start(Args, Format);
+    Message.append(Format, Args);
+    va_end(Args);
+  }
+  NORETURN ~ScopedErrorReport() {
+    outputRaw(Message.data());
+    setAbortMessage(Message.data());
+    die();
+  }
+
+private:
+  ScopedString Message;
+};
+
+INLINE void NORETURN trap() { __builtin_trap(); }
+
+// This could potentially be called recursively if a CHECK fails in the reports.
+void NORETURN reportCheckFailed(const char *File, int Line,
+                                const char *Condition, u64 Value1, u64 Value2) {
+  static atomic_u32 NumberOfCalls;
+  if (atomic_fetch_add(&NumberOfCalls, 1, memory_order_relaxed) > 2) {
+    // TODO(kostyak): maybe sleep here?
+    trap();
+  }
+  ScopedErrorReport Report;
+  Report.append("CHECK failed @ %s:%d %s (%llu, %llu)\n", File, Line, Condition,
+                Value1, Value2);
+}
+
+// Generic string fatal error message.
+void NORETURN reportError(const char *Message) {
+  ScopedErrorReport Report;
+  Report.append("%s\n", Message);
+}
+
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
+  ScopedErrorReport Report;
+  Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
+}
+
+// The checksum of a chunk header is invalid. This could be caused by an
+// {over,under}write of the header, a pointer that is not an actual chunk.
+void NORETURN reportHeaderCorruption(void *Ptr) {
+  ScopedErrorReport Report;
+  Report.append("corrupted chunk header at address %p\n", Ptr);
+}
+
+// Two threads have attempted to modify a chunk header at the same time. This is
+// symptomatic of a race-condition in the application code, or general lack of
+// proper locking.
+void NORETURN reportHeaderRace(void *Ptr) {
+  ScopedErrorReport Report;
+  Report.append("race on chunk header at address %p\n", Ptr);
+}
+
+// The allocator was compiled with parameters that conflict with field size
+// requirements.
+void NORETURN reportSanityCheckError(const char *Field) {
+  ScopedErrorReport Report;
+  Report.append("maximum possible %s doesn't fit in header\n", Field);
+}
+
+// We enforce a maximum alignment, to keep fields smaller and generally prevent
+// integer overflows, or unexpected corner cases.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment) {
+  ScopedErrorReport Report;
+  Report.append("invalid allocation alignment: %zu exceeds maximum supported "
+                "alignment of %zu\n",
+                Alignment, MaxAlignment);
+}
+
+// See above, we also enforce a maximum size.
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+                                         uptr MaxSize) {
+  ScopedErrorReport Report;
+  Report.append("requested allocation size %zu (%zu after adjustments) exceeds "
+                "maximum supported size of %zu\n",
+                UserSize, TotalSize, MaxSize);
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+  ScopedErrorReport Report;
+  Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
+}
+
+static const char *stringifyAction(AllocatorAction Action) {
+  switch (Action) {
+  case AllocatorAction::Recycling:
+    return "recycling";
+  case AllocatorAction::Deallocating:
+    return "deallocating";
+  case AllocatorAction::Reallocating:
+    return "reallocating";
+  case AllocatorAction::Sizing:
+    return "sizing";
+  }
+  return "<invalid action>";
+}
+
+// The chunk is not in a state congruent with the operation we want to perform.
+// This is usually the case with a double-free, a realloc of a freed pointer.
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr) {
+  ScopedErrorReport Report;
+  Report.append("invalid chunk state when %s address %p\n",
+                stringifyAction(Action), Ptr);
+}
+
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr) {
+  ScopedErrorReport Report;
+  Report.append("misaligned pointer when %s address %p\n",
+                stringifyAction(Action), Ptr);
+}
+
+// The deallocation function used is at odds with the one used to allocate the
+// chunk (eg: new[]/delete or malloc/delete, and so on).
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+                                        u8 TypeA, u8 TypeB) {
+  ScopedErrorReport Report;
+  Report.append("allocation type mismatch when %s address %p (%d vs %d)\n",
+                stringifyAction(Action), Ptr, TypeA, TypeB);
+}
+
+// The size specified to the delete operator does not match the one that was
+// passed to new when allocating the chunk.
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size,
+                                       uptr ExpectedSize) {
+  ScopedErrorReport Report;
+  Report.append(
+      "invalid sized delete when deallocating address %p (%zu vs %zu)\n", Ptr,
+      Size, ExpectedSize);
+}
+
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment) {
+  ScopedErrorReport Report;
+  Report.append(
+      "invalid allocation alignment: %zu, alignment must be a power of two\n",
+      Alignment);
+}
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+  ScopedErrorReport Report;
+  Report.append("calloc parameters overflow: count * size (%zu * %zu) cannot "
+                "be represented with type size_t\n",
+                Count, Size);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+  ScopedErrorReport Report;
+  Report.append(
+      "invalid alignment requested in posix_memalign: %zu, alignment must be a "
+      "power of two and a multiple of sizeof(void *) == %zu\n",
+      Alignment, sizeof(void *));
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+  ScopedErrorReport Report;
+  Report.append("pvalloc parameters overflow: size %zu rounded up to system "
+                "page size %zu cannot be represented in type size_t\n",
+                Size, getPageSizeCached());
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Alignment, uptr Size) {
+  ScopedErrorReport Report;
+  Report.append("invalid alignment requested in aligned_alloc: %zu, alignment "
+                "must be a power of two and the requested size %zu must be a "
+                "multiple of alignment\n",
+                Alignment, Size);
+}
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/report.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/report.h
new file mode 100644
index 0000000..14e4e79
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/report.h
@@ -0,0 +1,57 @@
+//===-- report.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_REPORT_H_
+#define SCUDO_REPORT_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Reports are *fatal* unless stated otherwise.
+
+// Generic error.
+void NORETURN reportError(const char *Message);
+
+// Flags related errors.
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
+
+// Chunk header related errors.
+void NORETURN reportHeaderCorruption(void *Ptr);
+void NORETURN reportHeaderRace(void *Ptr);
+
+// Sanity checks related error.
+void NORETURN reportSanityCheckError(const char *Field);
+
+// Combined allocator errors.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+                                         uptr MaxSize);
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+enum class AllocatorAction : u8 {
+  Recycling,
+  Deallocating,
+  Reallocating,
+  Sizing,
+};
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr);
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr);
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+                                        u8 TypeA, u8 TypeB);
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size, uptr ExpectedSize);
+
+// C wrappers errors.
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+
+} // namespace scudo
+
+#endif // SCUDO_REPORT_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/secondary.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/secondary.cc
new file mode 100644
index 0000000..75f9171
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/secondary.cc
@@ -0,0 +1,136 @@
+//===-- secondary.cc --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "secondary.h"
+
+#include "string_utils.h"
+
+namespace scudo {
+
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
+  DCHECK_GT(Size, AlignmentHint);
+  const uptr PageSize = getPageSizeCached();
+  const uptr MapSize =
+      roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize;
+  MapPlatformData Data = {};
+  uptr MapBase =
+      reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
+                                 MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+  if (!MapBase)
+    return nullptr;
+  uptr CommitBase = MapBase + PageSize;
+  uptr MapEnd = MapBase + MapSize;
+
+  // In the unlikely event of alignments larger than a page, adjust the amount
+  // of memory we want to commit, and trim the extra memory.
+  if (AlignmentHint >= PageSize) {
+    // For alignments greater than or equal to a page, the user pointer (eg: the
+    // pointer that is returned by the C or C++ allocation APIs) ends up on a
+    // page boundary , and our headers will live in the preceding page.
+    CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+    const uptr NewMapBase = CommitBase - PageSize;
+    DCHECK_GE(NewMapBase, MapBase);
+    // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+    // are less constrained memory wise, and that saves us two syscalls.
+    if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+      unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+      MapBase = NewMapBase;
+    }
+    const uptr NewMapEnd = CommitBase + PageSize +
+                           roundUpTo((Size - AlignmentHint), PageSize) +
+                           PageSize;
+    DCHECK_LE(NewMapEnd, MapEnd);
+    if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+      unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+      MapEnd = NewMapEnd;
+    }
+  }
+
+  const uptr CommitSize = MapEnd - PageSize - CommitBase;
+  const uptr Ptr =
+      reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
+                                 CommitSize, "scudo:secondary", 0, &Data));
+  LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+  H->MapBase = MapBase;
+  H->MapSize = MapEnd - MapBase;
+  H->BlockEnd = CommitBase + CommitSize;
+  H->Data = Data;
+  {
+    ScopedLock L(Mutex);
+    if (!Tail) {
+      Tail = H;
+    } else {
+      Tail->Next = H;
+      H->Prev = Tail;
+      Tail = H;
+    }
+    AllocatedBytes += CommitSize;
+    if (LargestSize < CommitSize)
+      LargestSize = CommitSize;
+    NumberOfAllocs++;
+    Stats.add(StatAllocated, CommitSize);
+    Stats.add(StatMapped, H->MapSize);
+  }
+  if (BlockEnd)
+    *BlockEnd = CommitBase + CommitSize;
+  return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+}
+
+void MapAllocator::deallocate(void *Ptr) {
+  LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+  {
+    ScopedLock L(Mutex);
+    LargeBlock::Header *Prev = H->Prev;
+    LargeBlock::Header *Next = H->Next;
+    if (Prev) {
+      CHECK_EQ(Prev->Next, H);
+      Prev->Next = Next;
+    }
+    if (Next) {
+      CHECK_EQ(Next->Prev, H);
+      Next->Prev = Prev;
+    }
+    if (Tail == H) {
+      CHECK(!Next);
+      Tail = Prev;
+    } else {
+      CHECK(Next);
+    }
+    const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+    FreedBytes += CommitSize;
+    NumberOfFrees++;
+    Stats.sub(StatAllocated, CommitSize);
+    Stats.sub(StatMapped, H->MapSize);
+  }
+  void *Addr = reinterpret_cast<void *>(H->MapBase);
+  const uptr Size = H->MapSize;
+  MapPlatformData Data;
+  Data = H->Data;
+  unmap(Addr, Size, UNMAP_ALL, &Data);
+}
+
+void MapAllocator::printStats() const {
+  Printf("Stats: MapAllocator: allocated %zd times (%zdK), freed %zd times "
+         "(%zdK), remains %zd (%zdK) max %zdM\n",
+         NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+         NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+         LargestSize >> 20);
+}
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
new file mode 100644
index 0000000..9124e2a
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
@@ -0,0 +1,97 @@
+//===-- secondary.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SECONDARY_H_
+#define SCUDO_SECONDARY_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "stats.h"
+
+namespace scudo {
+
+// This allocator wraps the platform allocation primitives, and as such is on
+// the slower side and should preferably be used for larger sized allocations.
+// Blocks allocated will be preceded and followed by a guard page, and hold
+// their own header that is not checksummed: the guard pages and the Combined
+// header should be enough for our purpose.
+
+namespace LargeBlock {
+
+struct Header {
+  LargeBlock::Header *Prev;
+  LargeBlock::Header *Next;
+  uptr BlockEnd;
+  uptr MapBase;
+  uptr MapSize;
+  MapPlatformData Data;
+};
+
+constexpr uptr getHeaderSize() {
+  return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+static Header *getHeader(uptr Ptr) {
+  return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+}
+
+static Header *getHeader(const void *Ptr) {
+  return getHeader(reinterpret_cast<uptr>(Ptr));
+}
+
+} // namespace LargeBlock
+
+class MapAllocator {
+public:
+  void initLinkerInitialized(GlobalStats *S) {
+    Stats.initLinkerInitialized();
+    if (S)
+      S->link(&Stats);
+  }
+  void init(GlobalStats *S) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(S);
+  }
+
+  void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr);
+
+  void deallocate(void *Ptr);
+
+  static uptr getBlockEnd(void *Ptr) {
+    return LargeBlock::getHeader(Ptr)->BlockEnd;
+  }
+
+  static uptr getBlockSize(void *Ptr) {
+    return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
+  }
+
+  void printStats() const;
+
+  void disable() { Mutex.lock(); }
+
+  void enable() { Mutex.unlock(); }
+
+  template <typename F> void iterateOverBlocks(F Callback) const {
+    for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev)
+      Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize());
+  }
+
+private:
+  HybridMutex Mutex;
+  LargeBlock::Header *Tail;
+  uptr AllocatedBytes;
+  uptr FreedBytes;
+  uptr LargestSize;
+  u32 NumberOfAllocs;
+  u32 NumberOfFrees;
+  LocalStats Stats;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_SECONDARY_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
new file mode 100644
index 0000000..b7df54c
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -0,0 +1,149 @@
+//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SIZE_CLASS_MAP_H_
+#define SCUDO_SIZE_CLASS_MAP_H_
+
+#include "common.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassMap maps allocation sizes into size classes and back, in an
+// efficient table-free manner.
+//
+// Class 0 is a special class that doesn't abide by the same rules as other
+// classes. The allocator uses it to hold batches.
+//
+// The other sizes are controlled by the template parameters:
+// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
+// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
+// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
+//               2^MidSizeLog bytes.
+// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
+//            eg. with NumBits==3 all size classes after 2^MidSizeLog look like
+//            0b1xx0..0 (where x is either 0 or 1).
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that can be cached per-thread:
+// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
+// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
+
+template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog,
+          u32 MaxNumCachedHintT, u8 MaxBytesCachedLog>
+class SizeClassMap {
+  static const uptr MinSize = 1UL << MinSizeLog;
+  static const uptr MidSize = 1UL << MidSizeLog;
+  static const uptr MidClass = MidSize / MinSize;
+  static const u8 S = NumBits - 1;
+  static const uptr M = (1UL << S) - 1;
+
+public:
+  static const u32 MaxNumCachedHint = MaxNumCachedHintT;
+
+  static const uptr MaxSize = 1UL << MaxSizeLog;
+  static const uptr NumClasses =
+      MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
+  COMPILER_CHECK(NumClasses <= 256);
+  static const uptr LargestClassId = NumClasses - 1;
+  static const uptr BatchClassId = 0;
+
+  static uptr getSizeByClassId(uptr ClassId) {
+    DCHECK_NE(ClassId, BatchClassId);
+    if (ClassId <= MidClass)
+      return ClassId << MinSizeLog;
+    ClassId -= MidClass;
+    const uptr T = MidSize << (ClassId >> S);
+    return T + (T >> S) * (ClassId & M);
+  }
+
+  static uptr getClassIdBySize(uptr Size) {
+    DCHECK_LE(Size, MaxSize);
+    if (Size <= MidSize)
+      return (Size + MinSize - 1) >> MinSizeLog;
+    const uptr L = getMostSignificantSetBitIndex(Size);
+    const uptr HBits = (Size >> (L - S)) & M;
+    const uptr LBits = Size & ((1UL << (L - S)) - 1);
+    const uptr L1 = L - MidSizeLog;
+    return MidClass + (L1 << S) + HBits + (LBits > 0);
+  }
+
+  static u32 getMaxCachedHint(uptr Size) {
+    DCHECK_LE(Size, MaxSize);
+    DCHECK_NE(Size, 0);
+    u32 N;
+    // Force a 32-bit division if the template parameters allow for it.
+    if (MaxBytesCachedLog > 31 || MaxSizeLog > 31)
+      N = static_cast<u32>((1UL << MaxBytesCachedLog) / Size);
+    else
+      N = (1U << MaxBytesCachedLog) / static_cast<u32>(Size);
+    return Max(1U, Min(MaxNumCachedHint, N));
+  }
+
+  static void print() {
+    uptr PrevS = 0;
+    uptr TotalCached = 0;
+    for (uptr I = 0; I < NumClasses; I++) {
+      if (I == BatchClassId)
+        continue;
+      const uptr S = getSizeByClassId(I);
+      if (S >= MidSize / 2 && (S & (S - 1)) == 0)
+        Printf("\n");
+      const uptr D = S - PrevS;
+      const uptr P = PrevS ? (D * 100 / PrevS) : 0;
+      const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
+      const uptr Cached = getMaxCachedHint(S) * S;
+      Printf(
+          "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
+          I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached,
+          getClassIdBySize(S));
+      TotalCached += Cached;
+      PrevS = S;
+    }
+    Printf("Total Cached: %zu\n", TotalCached);
+  }
+
+  static void validate() {
+    for (uptr C = 0; C < NumClasses; C++) {
+      if (C == BatchClassId)
+        continue;
+      const uptr S = getSizeByClassId(C);
+      CHECK_NE(S, 0U);
+      CHECK_EQ(getClassIdBySize(S), C);
+      if (C < LargestClassId)
+        CHECK_EQ(getClassIdBySize(S + 1), C + 1);
+      CHECK_EQ(getClassIdBySize(S - 1), C);
+      CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1));
+    }
+    // Do not perform the loop if the maximum size is too large.
+    if (MaxSizeLog > 19)
+      return;
+    for (uptr S = 1; S <= MaxSize; S++) {
+      const uptr C = getClassIdBySize(S);
+      CHECK_LT(C, NumClasses);
+      CHECK_GE(getSizeByClassId(C), S);
+      if (C > 0)
+        CHECK_LT(getSizeByClassId(C - 1), S);
+    }
+  }
+};
+
+typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap;
+
+// TODO(kostyak): further tune class maps for Android & Fuchsia.
+#if SCUDO_WORDSIZE == 64U
+typedef SizeClassMap<3, 5, 8, 15, 8, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 5, 8, 16, 14, 12> AndroidSizeClassMap;
+#else
+typedef SizeClassMap<3, 4, 7, 15, 8, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 4, 7, 16, 14, 12> AndroidSizeClassMap;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_SIZE_CLASS_MAP_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/stats.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
new file mode 100644
index 0000000..1243675
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
@@ -0,0 +1,105 @@
+//===-- stats.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STATS_H_
+#define SCUDO_STATS_H_
+
+#include "atomic_helpers.h"
+#include "mutex.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// Memory allocator statistics
+enum StatType { StatAllocated, StatMapped, StatCount };
+
+typedef uptr StatCounters[StatCount];
+
+// Per-thread stats, live in per-thread cache. We use atomics so that the
+// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
+// lock, because those are expensive operations , and we only care for the stats
+// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
+// LocalStats::add'ing, this is OK, we will still get a meaningful number.
+class LocalStats {
+public:
+  void initLinkerInitialized() {}
+  void init() { memset(this, 0, sizeof(*this)); }
+
+  void add(StatType I, uptr V) {
+    V += atomic_load_relaxed(&StatsArray[I]);
+    atomic_store_relaxed(&StatsArray[I], V);
+  }
+
+  void sub(StatType I, uptr V) {
+    V = atomic_load_relaxed(&StatsArray[I]) - V;
+    atomic_store_relaxed(&StatsArray[I], V);
+  }
+
+  void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
+
+  uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
+
+private:
+  friend class GlobalStats;
+  atomic_uptr StatsArray[StatCount];
+  LocalStats *Next;
+  LocalStats *Prev;
+};
+
+// Global stats, used for aggregation and querying.
+class GlobalStats : public LocalStats {
+public:
+  void initLinkerInitialized() {
+    Next = this;
+    Prev = this;
+  }
+  void init() {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized();
+  }
+
+  void link(LocalStats *S) {
+    ScopedLock L(Mutex);
+    S->Next = Next;
+    S->Prev = this;
+    Next->Prev = S;
+    Next = S;
+  }
+
+  void unlink(LocalStats *S) {
+    ScopedLock L(Mutex);
+    S->Prev->Next = S->Next;
+    S->Next->Prev = S->Prev;
+    for (uptr I = 0; I < StatCount; I++)
+      add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
+  }
+
+  void get(uptr *S) const {
+    memset(S, 0, StatCount * sizeof(uptr));
+    ScopedLock L(Mutex);
+    const LocalStats *Stats = this;
+    for (;;) {
+      for (uptr I = 0; I < StatCount; I++)
+        S[I] += Stats->get(static_cast<StatType>(I));
+      Stats = Stats->Next;
+      if (Stats == this)
+        break;
+    }
+    // All stats must be non-negative.
+    for (uptr I = 0; I < StatCount; I++)
+      S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
+  }
+
+private:
+  mutable HybridMutex Mutex;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_STATS_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cc
new file mode 100644
index 0000000..f0068af
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cc
@@ -0,0 +1,236 @@
+//===-- string_utils.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "string_utils.h"
+#include "common.h"
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <string.h>
+
+namespace scudo {
+
+static int appendChar(char **Buffer, const char *BufferEnd, char C) {
+  if (*Buffer < BufferEnd) {
+    **Buffer = C;
+    (*Buffer)++;
+  }
+  return 1;
+}
+
+// Appends number in a given Base to buffer. If its length is less than
+// |MinNumberLength|, it is padded with leading zeroes or spaces, depending
+// on the value of |PadWithZero|.
+static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
+                        u8 Base, u8 MinNumberLength, bool PadWithZero,
+                        bool Negative, bool Upper) {
+  constexpr uptr MaxLen = 30;
+  RAW_CHECK(Base == 10 || Base == 16);
+  RAW_CHECK(Base == 10 || !Negative);
+  RAW_CHECK(AbsoluteValue || !Negative);
+  RAW_CHECK(MinNumberLength < MaxLen);
+  int Res = 0;
+  if (Negative && MinNumberLength)
+    --MinNumberLength;
+  if (Negative && PadWithZero)
+    Res += appendChar(Buffer, BufferEnd, '-');
+  uptr NumBuffer[MaxLen];
+  int Pos = 0;
+  do {
+    RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
+                  "appendNumber buffer overflow");
+    NumBuffer[Pos++] = AbsoluteValue % Base;
+    AbsoluteValue /= Base;
+  } while (AbsoluteValue > 0);
+  if (Pos < MinNumberLength) {
+    memset(&NumBuffer[Pos], 0,
+           sizeof(NumBuffer[0]) * static_cast<uptr>(MinNumberLength - Pos));
+    Pos = MinNumberLength;
+  }
+  RAW_CHECK(Pos > 0);
+  Pos--;
+  for (; Pos >= 0 && NumBuffer[Pos] == 0; Pos--) {
+    char c = (PadWithZero || Pos == 0) ? '0' : ' ';
+    Res += appendChar(Buffer, BufferEnd, c);
+  }
+  if (Negative && !PadWithZero)
+    Res += appendChar(Buffer, BufferEnd, '-');
+  for (; Pos >= 0; Pos--) {
+    char Digit = static_cast<char>(NumBuffer[Pos]);
+    Digit = static_cast<char>((Digit < 10) ? '0' + Digit
+                                           : (Upper ? 'A' : 'a') + Digit - 10);
+    Res += appendChar(Buffer, BufferEnd, Digit);
+  }
+  return Res;
+}
+
+static int appendUnsigned(char **Buffer, const char *BufferEnd, u64 Num,
+                          u8 Base, u8 MinNumberLength, bool PadWithZero,
+                          bool Upper) {
+  return appendNumber(Buffer, BufferEnd, Num, Base, MinNumberLength,
+                      PadWithZero, /*Negative=*/false, Upper);
+}
+
+static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
+                               u8 MinNumberLength, bool PadWithZero) {
+  const bool Negative = (Num < 0);
+  return appendNumber(Buffer, BufferEnd,
+                      static_cast<u64>(Negative ? -Num : Num), 10,
+                      MinNumberLength, PadWithZero, Negative,
+                      /*Upper=*/false);
+}
+
+// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
+// interpret Width == 0 as "no Width requested":
+// Width == 0 - no Width requested
+// Width  < 0 - left-justify S within and pad it to -Width chars, if necessary
+// Width  > 0 - right-justify S, not implemented yet
+static int appendString(char **Buffer, const char *BufferEnd, int Width,
+                        int MaxChars, const char *S) {
+  if (!S)
+    S = "<null>";
+  int Res = 0;
+  for (; *S; S++) {
+    if (MaxChars >= 0 && Res >= MaxChars)
+      break;
+    Res += appendChar(Buffer, BufferEnd, *S);
+  }
+  // Only the left justified strings are supported.
+  while (Width < -Res)
+    Res += appendChar(Buffer, BufferEnd, ' ');
+  return Res;
+}
+
+static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
+  int Res = 0;
+  Res += appendString(Buffer, BufferEnd, 0, -1, "0x");
+  Res += appendUnsigned(Buffer, BufferEnd, ptr_value, 16,
+                        SCUDO_POINTER_FORMAT_LENGTH, /*PadWithZero=*/true,
+                        /*Upper=*/false);
+  return Res;
+}
+
+int formatString(char *Buffer, uptr BufferLength, const char *Format,
+                 va_list Args) {
+  UNUSED static const char *PrintfFormatsHelp =
+      "Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+      "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+  RAW_CHECK(Format);
+  RAW_CHECK(BufferLength > 0);
+  const char *BufferEnd = &Buffer[BufferLength - 1];
+  const char *Cur = Format;
+  int Res = 0;
+  for (; *Cur; Cur++) {
+    if (*Cur != '%') {
+      Res += appendChar(&Buffer, BufferEnd, *Cur);
+      continue;
+    }
+    Cur++;
+    const bool LeftJustified = *Cur == '-';
+    if (LeftJustified)
+      Cur++;
+    bool HaveWidth = (*Cur >= '0' && *Cur <= '9');
+    const bool PadWithZero = (*Cur == '0');
+    u8 Width = 0;
+    if (HaveWidth) {
+      while (*Cur >= '0' && *Cur <= '9')
+        Width = static_cast<u8>(Width * 10 + *Cur++ - '0');
+    }
+    const bool HavePrecision = (Cur[0] == '.' && Cur[1] == '*');
+    int Precision = -1;
+    if (HavePrecision) {
+      Cur += 2;
+      Precision = va_arg(Args, int);
+    }
+    const bool HaveZ = (*Cur == 'z');
+    Cur += HaveZ;
+    const bool HaveLL = !HaveZ && (Cur[0] == 'l' && Cur[1] == 'l');
+    Cur += HaveLL * 2;
+    s64 DVal;
+    u64 UVal;
+    const bool HaveLength = HaveZ || HaveLL;
+    const bool HaveFlags = HaveWidth || HaveLength;
+    // At the moment only %s supports precision and left-justification.
+    CHECK(!((Precision >= 0 || LeftJustified) && *Cur != 's'));
+    switch (*Cur) {
+    case 'd': {
+      DVal = HaveLL ? va_arg(Args, s64)
+                    : HaveZ ? va_arg(Args, sptr) : va_arg(Args, int);
+      Res += appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
+      break;
+    }
+    case 'u':
+    case 'x':
+    case 'X': {
+      UVal = HaveLL ? va_arg(Args, u64)
+                    : HaveZ ? va_arg(Args, uptr) : va_arg(Args, unsigned);
+      const bool Upper = (*Cur == 'X');
+      Res += appendUnsigned(&Buffer, BufferEnd, UVal, (*Cur == 'u') ? 10 : 16,
+                            Width, PadWithZero, Upper);
+      break;
+    }
+    case 'p': {
+      RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+      Res += appendPointer(&Buffer, BufferEnd, va_arg(Args, uptr));
+      break;
+    }
+    case 's': {
+      RAW_CHECK_MSG(!HaveLength, PrintfFormatsHelp);
+      // Only left-justified Width is supported.
+      CHECK(!HaveWidth || LeftJustified);
+      Res += appendString(&Buffer, BufferEnd, LeftJustified ? -Width : Width,
+                          Precision, va_arg(Args, char *));
+      break;
+    }
+    case 'c': {
+      RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+      Res +=
+          appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
+      break;
+    }
+    case '%': {
+      RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+      Res += appendChar(&Buffer, BufferEnd, '%');
+      break;
+    }
+    default: {
+      RAW_CHECK_MSG(false, PrintfFormatsHelp);
+    }
+    }
+  }
+  RAW_CHECK(Buffer <= BufferEnd);
+  appendChar(&Buffer, BufferEnd + 1, '\0');
+  return Res;
+}
+
+void ScopedString::append(const char *Format, va_list Args) {
+  CHECK_LT(Length, String.size());
+  formatString(String.data() + Length, String.size() - Length, Format, Args);
+  Length += strlen(String.data() + Length);
+  CHECK_LT(Length, String.size());
+}
+
+FORMAT(2, 3)
+void ScopedString::append(const char *Format, ...) {
+  va_list Args;
+  va_start(Args, Format);
+  append(Format, Args);
+  va_end(Args);
+}
+
+FORMAT(1, 2)
+void Printf(const char *Format, ...) {
+  va_list Args;
+  va_start(Args, Format);
+  ScopedString Msg(512);
+  Msg.append(Format, Args);
+  outputRaw(Msg.data());
+  va_end(Args);
+}
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
new file mode 100644
index 0000000..aea7b3f
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
@@ -0,0 +1,42 @@
+//===-- string_utils.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STRING_UTILS_H_
+#define SCUDO_STRING_UTILS_H_
+
+#include "internal_defs.h"
+#include "vector.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedString {
+public:
+  explicit ScopedString(uptr MaxLength) : String(MaxLength), Length(0) {
+    String[0] = '\0';
+  }
+  uptr length() { return Length; }
+  const char *data() { return String.data(); }
+  void clear() {
+    String[0] = '\0';
+    Length = 0;
+  }
+  void append(const char *Format, va_list Args);
+  void append(const char *Format, ...);
+
+private:
+  Vector<char> String;
+  uptr Length;
+};
+
+void Printf(const char *Format, ...);
+
+} // namespace scudo
+
+#endif // SCUDO_STRING_UTILS_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt
new file mode 100644
index 0000000..039ad48
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt
@@ -0,0 +1,98 @@
+include_directories(..)
+
+add_custom_target(ScudoUnitTests)
+set_target_properties(ScudoUnitTests PROPERTIES
+  FOLDER "Compiler-RT Tests")
+
+set(SCUDO_UNITTEST_CFLAGS
+  ${COMPILER_RT_UNITTEST_CFLAGS}
+  ${COMPILER_RT_GTEST_CFLAGS}
+  -I${COMPILER_RT_SOURCE_DIR}/include
+  -I${COMPILER_RT_SOURCE_DIR}/lib
+  -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone
+  -DGTEST_HAS_RTTI=0
+  # Extra flags for the C++ tests
+  # TODO(kostyak): find a way to make -fsized-deallocation work
+  -Wno-mismatched-new-delete)
+
+set(SCUDO_TEST_ARCH ${SCUDO_STANDALONE_SUPPORTED_ARCH})
+
+# gtests requires c++
+set(LINK_FLAGS ${COMPILER_RT_UNITTEST_LINK_FLAGS})
+foreach(lib ${SANITIZER_TEST_CXX_LIBRARIES})
+  list(APPEND LINK_FLAGS -l${lib})
+endforeach()
+list(APPEND LINK_FLAGS -pthread)
+# Linking against libatomic is required with some compilers
+list(APPEND LINK_FLAGS -latomic)
+
+set(SCUDO_TEST_HEADERS)
+foreach (header ${SCUDO_HEADERS})
+  list(APPEND SCUDO_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header})
+endforeach()
+
+macro(add_scudo_unittest testname)
+  cmake_parse_arguments(TEST "" "" "SOURCES;ADDITIONAL_RTOBJECTS" ${ARGN})
+  if(COMPILER_RT_HAS_SCUDO_STANDALONE)
+    foreach(arch ${SCUDO_TEST_ARCH})
+      # Additional runtime objects get added along RTScudoStandalone
+      set(SCUDO_TEST_RTOBJECTS $<TARGET_OBJECTS:RTScudoStandalone.${arch}>)
+      foreach(rtobject ${TEST_ADDITIONAL_RTOBJECTS})
+        list(APPEND SCUDO_TEST_RTOBJECTS $<TARGET_OBJECTS:${rtobject}.${arch}>)
+      endforeach()
+      # Add the static runtime library made of all the runtime objects
+      set(RUNTIME RT${testname}.${arch})
+      add_library(${RUNTIME} STATIC ${SCUDO_TEST_RTOBJECTS})
+      set(ScudoUnitTestsObjects)
+      generate_compiler_rt_tests(ScudoUnitTestsObjects ScudoUnitTests
+        "${testname}-${arch}-Test" ${arch}
+        SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE}
+        COMPILE_DEPS ${SCUDO_TEST_HEADERS}
+        DEPS gtest scudo_standalone
+        RUNTIME ${RUNTIME}
+        CFLAGS ${SCUDO_UNITTEST_CFLAGS}
+        LINK_FLAGS ${LINK_FLAGS})
+    endforeach()
+  endif()
+endmacro()
+
+set(SCUDO_UNIT_TEST_SOURCES
+  atomic_test.cc
+  bytemap_test.cc
+  checksum_test.cc
+  chunk_test.cc
+  combined_test.cc
+  flags_test.cc
+  list_test.cc
+  map_test.cc
+  mutex_test.cc
+  primary_test.cc
+  quarantine_test.cc
+  release_test.cc
+  report_test.cc
+  secondary_test.cc
+  size_class_map_test.cc
+  stats_test.cc
+  strings_test.cc
+  tsd_test.cc
+  vector_test.cc
+  scudo_unit_test_main.cc)
+
+add_scudo_unittest(ScudoUnitTest
+  SOURCES ${SCUDO_UNIT_TEST_SOURCES})
+
+set(SCUDO_C_UNIT_TEST_SOURCES
+  wrappers_c_test.cc
+  scudo_unit_test_main.cc)
+
+add_scudo_unittest(ScudoCUnitTest
+  SOURCES ${SCUDO_C_UNIT_TEST_SOURCES}
+  ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers)
+
+set(SCUDO_CXX_UNIT_TEST_SOURCES
+  wrappers_cpp_test.cc
+  scudo_unit_test_main.cc)
+
+add_scudo_unittest(ScudoCxxUnitTest
+  SOURCES ${SCUDO_CXX_UNIT_TEST_SOURCES}
+  ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers RTScudoStandaloneCxxWrappers)
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/atomic_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/atomic_test.cc
new file mode 100644
index 0000000..3095451
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/atomic_test.cc
@@ -0,0 +1,112 @@
+//===-- atomic_test.cc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "scudo/standalone/atomic_helpers.h"
+#include "gtest/gtest.h"
+
+namespace scudo {
+
+template <typename T> struct ValAndMagic {
+  typename T::Type Magic0;
+  T A;
+  typename T::Type Magic1;
+
+  static ValAndMagic<T> *Sink;
+};
+
+template <typename T> ValAndMagic<T> *ValAndMagic<T>::Sink;
+
+template <typename T, memory_order LoadMO, memory_order StoreMO>
+void checkStoreLoad() {
+  typedef typename T::Type Type;
+  ValAndMagic<T> Val;
+  // Prevent the compiler from scalarizing the struct.
+  ValAndMagic<T>::Sink = &Val;
+  // Ensure that surrounding memory is not overwritten.
+  Val.Magic0 = Val.Magic1 = (Type)-3;
+  for (u64 I = 0; I < 100; I++) {
+    // Generate A value that occupies all bytes of the variable.
+    u64 V = I;
+    V |= V << 8;
+    V |= V << 16;
+    V |= V << 32;
+    Val.A.ValDoNotUse = (Type)V;
+    EXPECT_EQ(atomic_load(&Val.A, LoadMO), (Type)V);
+    Val.A.ValDoNotUse = (Type)-1;
+    atomic_store(&Val.A, (Type)V, StoreMO);
+    EXPECT_EQ(Val.A.ValDoNotUse, (Type)V);
+  }
+  EXPECT_EQ(Val.Magic0, (Type)-3);
+  EXPECT_EQ(Val.Magic1, (Type)-3);
+}
+
+TEST(ScudoAtomicTest, AtomicStoreLoad) {
+  checkStoreLoad<atomic_u8, memory_order_relaxed, memory_order_relaxed>();
+  checkStoreLoad<atomic_u8, memory_order_consume, memory_order_relaxed>();
+  checkStoreLoad<atomic_u8, memory_order_acquire, memory_order_relaxed>();
+  checkStoreLoad<atomic_u8, memory_order_relaxed, memory_order_release>();
+  checkStoreLoad<atomic_u8, memory_order_seq_cst, memory_order_seq_cst>();
+
+  checkStoreLoad<atomic_u16, memory_order_relaxed, memory_order_relaxed>();
+  checkStoreLoad<atomic_u16, memory_order_consume, memory_order_relaxed>();
+  checkStoreLoad<atomic_u16, memory_order_acquire, memory_order_relaxed>();
+  checkStoreLoad<atomic_u16, memory_order_relaxed, memory_order_release>();
+  checkStoreLoad<atomic_u16, memory_order_seq_cst, memory_order_seq_cst>();
+
+  checkStoreLoad<atomic_u32, memory_order_relaxed, memory_order_relaxed>();
+  checkStoreLoad<atomic_u32, memory_order_consume, memory_order_relaxed>();
+  checkStoreLoad<atomic_u32, memory_order_acquire, memory_order_relaxed>();
+  checkStoreLoad<atomic_u32, memory_order_relaxed, memory_order_release>();
+  checkStoreLoad<atomic_u32, memory_order_seq_cst, memory_order_seq_cst>();
+
+  checkStoreLoad<atomic_u64, memory_order_relaxed, memory_order_relaxed>();
+  checkStoreLoad<atomic_u64, memory_order_consume, memory_order_relaxed>();
+  checkStoreLoad<atomic_u64, memory_order_acquire, memory_order_relaxed>();
+  checkStoreLoad<atomic_u64, memory_order_relaxed, memory_order_release>();
+  checkStoreLoad<atomic_u64, memory_order_seq_cst, memory_order_seq_cst>();
+
+  checkStoreLoad<atomic_uptr, memory_order_relaxed, memory_order_relaxed>();
+  checkStoreLoad<atomic_uptr, memory_order_consume, memory_order_relaxed>();
+  checkStoreLoad<atomic_uptr, memory_order_acquire, memory_order_relaxed>();
+  checkStoreLoad<atomic_uptr, memory_order_relaxed, memory_order_release>();
+  checkStoreLoad<atomic_uptr, memory_order_seq_cst, memory_order_seq_cst>();
+}
+
+template <typename T> void checkAtomicCompareExchange() {
+  typedef typename T::Type Type;
+  {
+    Type OldVal = 42;
+    Type NewVal = 24;
+    Type V = OldVal;
+    EXPECT_TRUE(atomic_compare_exchange_strong(
+        reinterpret_cast<T *>(&V), &OldVal, NewVal, memory_order_relaxed));
+    EXPECT_FALSE(atomic_compare_exchange_strong(
+        reinterpret_cast<T *>(&V), &OldVal, NewVal, memory_order_relaxed));
+    EXPECT_EQ(NewVal, OldVal);
+  }
+  {
+    Type OldVal = 42;
+    Type NewVal = 24;
+    Type V = OldVal;
+    EXPECT_TRUE(atomic_compare_exchange_weak(reinterpret_cast<T *>(&V), &OldVal,
+                                             NewVal, memory_order_relaxed));
+    EXPECT_FALSE(atomic_compare_exchange_weak(
+        reinterpret_cast<T *>(&V), &OldVal, NewVal, memory_order_relaxed));
+    EXPECT_EQ(NewVal, OldVal);
+  }
+}
+
+TEST(ScudoAtomicTest, AtomicCompareExchangeTest) {
+  checkAtomicCompareExchange<atomic_u8>();
+  checkAtomicCompareExchange<atomic_u16>();
+  checkAtomicCompareExchange<atomic_u32>();
+  checkAtomicCompareExchange<atomic_u64>();
+  checkAtomicCompareExchange<atomic_uptr>();
+}
+
+} // namespace scudo
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/bytemap_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/bytemap_test.cc
new file mode 100644
index 0000000..ea34dc0
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/bytemap_test.cc
@@ -0,0 +1,74 @@
+//===-- bytemap_test.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "bytemap.h"
+
+#include "gtest/gtest.h"
+
+#include <string.h>
+
+template <typename T> void testMap(T &Map, scudo::uptr Size) {
+  Map.init();
+  for (scudo::uptr I = 0; I < Size; I += 7)
+    Map.set(I, (I % 100) + 1);
+  for (scudo::uptr J = 0; J < Size; J++) {
+    if (J % 7)
+      EXPECT_EQ(Map[J], 0);
+    else
+      EXPECT_EQ(Map[J], (J % 100) + 1);
+  }
+}
+
+TEST(ScudoByteMapTest, FlatByteMap) {
+  const scudo::uptr Size = 1U << 10;
+  scudo::FlatByteMap<Size> Map;
+  testMap(Map, Size);
+  Map.unmapTestOnly();
+}
+
+TEST(ScudoByteMapTest, TwoLevelByteMap) {
+  const scudo::uptr Size1 = 1U << 6, Size2 = 1U << 12;
+  scudo::TwoLevelByteMap<Size1, Size2> Map;
+  testMap(Map, Size1 * Size2);
+  Map.unmapTestOnly();
+}
+
+using TestByteMap = scudo::TwoLevelByteMap<1U << 12, 1U << 13>;
+
+struct TestByteMapParam {
+  TestByteMap *Map;
+  scudo::uptr Shard;
+  scudo::uptr NumberOfShards;
+};
+
+void *populateByteMap(void *Param) {
+  TestByteMapParam *P = reinterpret_cast<TestByteMapParam *>(Param);
+  for (scudo::uptr I = P->Shard; I < P->Map->size(); I += P->NumberOfShards) {
+    scudo::u8 V = static_cast<scudo::u8>((I % 100) + 1);
+    P->Map->set(I, V);
+    EXPECT_EQ((*P->Map)[I], V);
+  }
+  return 0;
+}
+
+TEST(ScudoByteMapTest, ThreadedTwoLevelByteMap) {
+  TestByteMap Map;
+  Map.init();
+  static const scudo::uptr NumberOfThreads = 16U;
+  pthread_t T[NumberOfThreads];
+  TestByteMapParam P[NumberOfThreads];
+  for (scudo::uptr I = 0; I < NumberOfThreads; I++) {
+    P[I].Map = &Map;
+    P[I].Shard = I;
+    P[I].NumberOfShards = NumberOfThreads;
+    pthread_create(&T[I], 0, populateByteMap, &P[I]);
+  }
+  for (scudo::uptr I = 0; I < NumberOfThreads; I++)
+    pthread_join(T[I], 0);
+  Map.unmapTestOnly();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/checksum_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/checksum_test.cc
new file mode 100644
index 0000000..2e8dc8a
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/checksum_test.cc
@@ -0,0 +1,58 @@
+//===-- checksum_test.cc ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+
+#include "gtest/gtest.h"
+
+#include <string.h>
+
+scudo::u16 computeSoftwareChecksum(scudo::u32 Seed, scudo::uptr *Array,
+                                   scudo::uptr ArraySize) {
+  scudo::u16 Checksum = static_cast<scudo::u16>(Seed & 0xffff);
+  for (scudo::uptr I = 0; I < ArraySize; I++)
+    Checksum = scudo::computeBSDChecksum(Checksum, Array[I]);
+  return Checksum;
+}
+
+scudo::u16 computeHardwareChecksum(scudo::u32 Seed, scudo::uptr *Array,
+                                   scudo::uptr ArraySize) {
+  scudo::u32 Crc = Seed;
+  for (scudo::uptr I = 0; I < ArraySize; I++)
+    Crc = scudo::computeHardwareCRC32(Crc, Array[I]);
+  return static_cast<scudo::u16>((Crc & 0xffff) ^ (Crc >> 16));
+}
+
+typedef scudo::u16 (*ComputeChecksum)(scudo::u32, scudo::uptr *, scudo::uptr);
+
+// This verifies that flipping bits in the data being checksummed produces a
+// different checksum. We do not use random data to avoid flakyness.
+template <ComputeChecksum F> void verifyChecksumFunctionBitFlip() {
+  scudo::uptr Array[sizeof(scudo::u64) / sizeof(scudo::uptr)];
+  const scudo::uptr ArraySize = ARRAY_SIZE(Array);
+  memset(Array, 0xaa, sizeof(Array));
+  const scudo::u32 Seed = 0x41424343U;
+  const scudo::u16 Reference = F(Seed, Array, ArraySize);
+  scudo::u8 IdenticalChecksums = 0;
+  for (scudo::uptr I = 0; I < ArraySize; I++) {
+    for (scudo::uptr J = 0; J < SCUDO_WORDSIZE; J++) {
+      Array[I] ^= 1U << J;
+      if (F(Seed, Array, ArraySize) == Reference)
+        IdenticalChecksums++;
+      Array[I] ^= 1U << J;
+    }
+  }
+  // Allow for a couple of identical checksums over the whole set of flips.
+  EXPECT_LE(IdenticalChecksums, 2);
+}
+
+TEST(ScudoChecksumTest, ChecksumFunctions) {
+  verifyChecksumFunctionBitFlip<computeSoftwareChecksum>();
+  if (&scudo::computeHardwareCRC32 && scudo::hasHardwareCRC32())
+    verifyChecksumFunctionBitFlip<computeHardwareChecksum>();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/chunk_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/chunk_test.cc
new file mode 100644
index 0000000..c3a21e4
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/chunk_test.cc
@@ -0,0 +1,80 @@
+//===-- chunk_test.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "chunk.h"
+
+#include "gtest/gtest.h"
+
+#include <stdlib.h>
+
+static constexpr scudo::uptr HeaderSize = scudo::Chunk::getHeaderSize();
+static constexpr scudo::u32 Cookie = 0x41424344U;
+static constexpr scudo::u32 InvalidCookie = 0x11223344U;
+
+static void initChecksum(void) {
+  if (&scudo::computeHardwareCRC32 && scudo::hasHardwareCRC32())
+    scudo::HashAlgorithm = scudo::Checksum::HardwareCRC32;
+}
+
+TEST(ScudoChunkTest, ChunkBasic) {
+  initChecksum();
+  const scudo::uptr Size = 0x100U;
+  scudo::Chunk::UnpackedHeader Header = {};
+  void *Block = malloc(HeaderSize + Size);
+  void *P = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Block) +
+                                     HeaderSize);
+  scudo::Chunk::storeHeader(Cookie, P, &Header);
+  memset(P, 'A', Size);
+  scudo::Chunk::loadHeader(Cookie, P, &Header);
+  EXPECT_TRUE(scudo::Chunk::isValid(Cookie, P, &Header));
+  EXPECT_FALSE(scudo::Chunk::isValid(InvalidCookie, P, &Header));
+  EXPECT_DEATH(scudo::Chunk::loadHeader(InvalidCookie, P, &Header), "");
+  free(Block);
+}
+
+TEST(ScudoChunkTest, ChunkCmpXchg) {
+  initChecksum();
+  const scudo::uptr Size = 0x100U;
+  scudo::Chunk::UnpackedHeader OldHeader = {};
+  OldHeader.Origin = scudo::Chunk::Origin::Malloc;
+  OldHeader.ClassId = 0x42U;
+  OldHeader.SizeOrUnusedBytes = Size;
+  OldHeader.State = scudo::Chunk::State::Allocated;
+  void *Block = malloc(HeaderSize + Size);
+  void *P = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Block) +
+                                     HeaderSize);
+  scudo::Chunk::storeHeader(Cookie, P, &OldHeader);
+  memset(P, 'A', Size);
+  scudo::Chunk::UnpackedHeader NewHeader = OldHeader;
+  NewHeader.State = scudo::Chunk::State::Quarantined;
+  scudo::Chunk::compareExchangeHeader(Cookie, P, &NewHeader, &OldHeader);
+  NewHeader = {};
+  EXPECT_TRUE(scudo::Chunk::isValid(Cookie, P, &NewHeader));
+  EXPECT_EQ(NewHeader.State, scudo::Chunk::State::Quarantined);
+  EXPECT_FALSE(scudo::Chunk::isValid(InvalidCookie, P, &NewHeader));
+  free(Block);
+}
+
+TEST(ScudoChunkTest, CorruptHeader) {
+  initChecksum();
+  const scudo::uptr Size = 0x100U;
+  scudo::Chunk::UnpackedHeader Header = {};
+  void *Block = malloc(HeaderSize + Size);
+  void *P = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Block) +
+                                     HeaderSize);
+  scudo::Chunk::storeHeader(Cookie, P, &Header);
+  memset(P, 'A', Size);
+  scudo::Chunk::loadHeader(Cookie, P, &Header);
+  // Simulate a couple of corrupted bits per byte of header data.
+  for (scudo::uptr I = 0; I < sizeof(scudo::Chunk::PackedHeader); I++) {
+    *(reinterpret_cast<scudo::u8 *>(Block) + I) ^= 0x42U;
+    EXPECT_DEATH(scudo::Chunk::loadHeader(Cookie, P, &Header), "");
+    *(reinterpret_cast<scudo::u8 *>(Block) + I) ^= 0x42U;
+  }
+  free(Block);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/combined_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/combined_test.cc
new file mode 100644
index 0000000..46ce2eb
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/combined_test.cc
@@ -0,0 +1,237 @@
+//===-- combined_test.cc ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "allocator_config.h"
+#include "combined.h"
+
+#include "gtest/gtest.h"
+
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+static std::mutex Mutex;
+static std::condition_variable Cv;
+static bool Ready = false;
+
+static constexpr scudo::Chunk::Origin Origin = scudo::Chunk::Origin::Malloc;
+
+// This allows us to turn on the Quarantine for specific tests. The Quarantine
+// parameters are on the low end, to avoid having to loop excessively in some
+// tests.
+static bool UseQuarantine = false;
+extern "C" const char *__scudo_default_options() {
+  if (!UseQuarantine)
+    return "";
+  return "quarantine_size_kb=256:thread_local_quarantine_size_kb=128:"
+         "quarantine_max_chunk_size=1024";
+}
+
+template <class Config> static void testAllocator() {
+  using AllocatorT = scudo::Allocator<Config>;
+  auto Deleter = [](AllocatorT *A) {
+    A->unmapTestOnly();
+    delete A;
+  };
+  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+                                                           Deleter);
+  Allocator->reset();
+
+  constexpr scudo::uptr MinAlignLog = FIRST_32_SECOND_64(3U, 4U);
+
+  // This allocates and deallocates a bunch of chunks, with a wide range of
+  // sizes and alignments, with a focus on sizes that could trigger weird
+  // behaviors (plus or minus a small delta of a power of two for example).
+  for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
+    for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
+      const scudo::uptr Align = 1U << AlignLog;
+      for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
+        if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
+          continue;
+        const scudo::uptr Size = (1U << SizeLog) + Delta;
+        void *P = Allocator->allocate(Size, Origin, Align);
+        EXPECT_NE(P, nullptr);
+        EXPECT_TRUE(scudo::isAligned(reinterpret_cast<scudo::uptr>(P), Align));
+        EXPECT_LE(Size, Allocator->getUsableSize(P));
+        memset(P, 0xaa, Size);
+        Allocator->deallocate(P, Origin, Size);
+      }
+    }
+  }
+  Allocator->releaseToOS();
+
+  // Verify that a chunk will end up being reused, at some point.
+  const scudo::uptr NeedleSize = 1024U;
+  void *NeedleP = Allocator->allocate(NeedleSize, Origin);
+  Allocator->deallocate(NeedleP, Origin);
+  bool Found = false;
+  for (scudo::uptr I = 0; I < 1024U && !Found; I++) {
+    void *P = Allocator->allocate(NeedleSize, Origin);
+    if (P == NeedleP)
+      Found = true;
+    Allocator->deallocate(P, Origin);
+  }
+  EXPECT_TRUE(Found);
+
+  constexpr scudo::uptr MaxSize = Config::Primary::SizeClassMap::MaxSize;
+
+  // Reallocate a large chunk all the way down to a byte, verifying that we
+  // preserve the data in the process.
+  scudo::uptr Size = MaxSize * 2;
+  const scudo::uptr DataSize = 2048U;
+  void *P = Allocator->allocate(Size, Origin);
+  const char Marker = 0xab;
+  memset(P, Marker, scudo::Min(Size, DataSize));
+  while (Size > 1U) {
+    Size /= 2U;
+    void *NewP = Allocator->reallocate(P, Size);
+    EXPECT_NE(NewP, nullptr);
+    for (scudo::uptr J = 0; J < scudo::Min(Size, DataSize); J++)
+      EXPECT_EQ((reinterpret_cast<char *>(NewP))[J], Marker);
+    P = NewP;
+  }
+  Allocator->deallocate(P, Origin);
+
+  // Allocates a bunch of chunks, then iterate over all the chunks, ensuring
+  // they are the ones we allocated. This requires the allocator to not have any
+  // other allocated chunk at this point (eg: won't work with the Quarantine).
+  if (!UseQuarantine) {
+    std::vector<void *> V;
+    for (scudo::uptr I = 0; I < 64U; I++)
+      V.push_back(Allocator->allocate(rand() % (MaxSize / 2U), Origin));
+    Allocator->disable();
+    Allocator->iterateOverChunks(
+        0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
+        [](uintptr_t Base, size_t Size, void *Arg) {
+          std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
+          void *P = reinterpret_cast<void *>(Base);
+          EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
+        },
+        reinterpret_cast<void *>(&V));
+    Allocator->enable();
+    while (!V.empty()) {
+      Allocator->deallocate(V.back(), Origin);
+      V.pop_back();
+    }
+  }
+
+  Allocator->releaseToOS();
+  Allocator->printStats();
+}
+
+TEST(ScudoCombinedTest, BasicCombined) {
+  testAllocator<scudo::DefaultConfig>();
+#if SCUDO_WORDSIZE == 64U
+  testAllocator<scudo::FuchsiaConfig>();
+#endif
+  // The following configs should work on all platforms.
+  UseQuarantine = true;
+  testAllocator<scudo::AndroidConfig>();
+  UseQuarantine = false;
+  testAllocator<scudo::AndroidSvelteConfig>();
+}
+
+template <typename AllocatorT> static void stressAllocator(AllocatorT *A) {
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    while (!Ready)
+      Cv.wait(Lock);
+  }
+  std::vector<std::pair<void *, scudo::uptr>> V;
+  for (scudo::uptr I = 0; I < 256U; I++) {
+    const scudo::uptr Size = std::rand() % 4096U;
+    void *P = A->allocate(Size, Origin);
+    // A region could have ran out of memory, resulting in a null P.
+    if (P)
+      V.push_back(std::make_pair(P, Size));
+  }
+  while (!V.empty()) {
+    auto Pair = V.back();
+    A->deallocate(Pair.first, Origin, Pair.second);
+    V.pop_back();
+  }
+}
+
+template <class Config> static void testAllocatorThreaded() {
+  using AllocatorT = scudo::Allocator<Config>;
+  auto Deleter = [](AllocatorT *A) {
+    A->unmapTestOnly();
+    delete A;
+  };
+  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+                                                           Deleter);
+  Allocator->reset();
+  std::thread Threads[32];
+  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
+    Threads[I] = std::thread(stressAllocator<AllocatorT>, Allocator.get());
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    Ready = true;
+    Cv.notify_all();
+  }
+  for (auto &T : Threads)
+    T.join();
+  Allocator->releaseToOS();
+}
+
+TEST(ScudoCombinedTest, ThreadedCombined) {
+  testAllocatorThreaded<scudo::DefaultConfig>();
+#if SCUDO_WORDSIZE == 64U
+  testAllocatorThreaded<scudo::FuchsiaConfig>();
+#endif
+  UseQuarantine = true;
+  testAllocatorThreaded<scudo::AndroidConfig>();
+  UseQuarantine = false;
+  testAllocatorThreaded<scudo::AndroidSvelteConfig>();
+}
+
+struct DeathConfig {
+  // Tiny allocator, its Primary only serves chunks of 1024 bytes.
+  using DeathSizeClassMap = scudo::SizeClassMap<1U, 10U, 10U, 10U, 1U, 10U>;
+  typedef scudo::SizeClassAllocator32<DeathSizeClassMap, 18U> Primary;
+  template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U>;
+};
+
+TEST(ScudoCombinedTest, DeathCombined) {
+  using AllocatorT = scudo::Allocator<DeathConfig>;
+  auto Deleter = [](AllocatorT *A) {
+    A->unmapTestOnly();
+    delete A;
+  };
+  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+                                                           Deleter);
+  Allocator->reset();
+
+  const scudo::uptr Size = 1000U;
+  void *P = Allocator->allocate(Size, Origin);
+  EXPECT_NE(P, nullptr);
+
+  // Invalid sized deallocation.
+  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size + 8U), "");
+
+  // Misaligned pointer.
+  void *MisalignedP =
+      reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) | 1U);
+  EXPECT_DEATH(Allocator->deallocate(MisalignedP, Origin, Size), "");
+  EXPECT_DEATH(Allocator->reallocate(MisalignedP, Size * 2U), "");
+
+  // Header corruption.
+  scudo::u64 *H =
+      reinterpret_cast<scudo::u64 *>(scudo::Chunk::getAtomicHeader(P));
+  *H ^= 0x42U;
+  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
+  *H ^= 0x420042U;
+  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
+  *H ^= 0x420000U;
+
+  // Invalid chunk state.
+  Allocator->deallocate(P, Origin, Size);
+  EXPECT_DEATH(Allocator->deallocate(P, Origin, Size), "");
+  EXPECT_DEATH(Allocator->reallocate(P, Size * 2U), "");
+  EXPECT_DEATH(Allocator->getUsableSize(P), "");
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/flags_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/flags_test.cc
new file mode 100644
index 0000000..2808a46c
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/flags_test.cc
@@ -0,0 +1,119 @@
+//===-- flags_test.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags.h"
+#include "flags_parser.h"
+
+#include "gtest/gtest.h"
+
+#include <string.h>
+
+static const char FlagName[] = "flag_name";
+static const char FlagDesc[] = "flag description";
+
+template <typename T>
+static void testFlag(scudo::FlagType Type, T StartValue, const char *Env,
+                     T FinalValue) {
+  scudo::FlagParser Parser;
+  T Flag = StartValue;
+  Parser.registerFlag(FlagName, FlagDesc, Type, &Flag);
+  Parser.parseString(Env);
+  EXPECT_EQ(FinalValue, Flag);
+  // Reporting unrecognized flags is needed to reset them.
+  scudo::reportUnrecognizedFlags();
+}
+
+TEST(ScudoFlagsTest, BooleanFlags) {
+  testFlag(scudo::FlagType::FT_bool, false, "flag_name=1", true);
+  testFlag(scudo::FlagType::FT_bool, false, "flag_name=yes", true);
+  testFlag(scudo::FlagType::FT_bool, false, "flag_name='yes'", true);
+  testFlag(scudo::FlagType::FT_bool, false, "flag_name=true", true);
+  testFlag(scudo::FlagType::FT_bool, true, "flag_name=0", false);
+  testFlag(scudo::FlagType::FT_bool, true, "flag_name=\"0\"", false);
+  testFlag(scudo::FlagType::FT_bool, true, "flag_name=no", false);
+  testFlag(scudo::FlagType::FT_bool, true, "flag_name=false", false);
+  testFlag(scudo::FlagType::FT_bool, true, "flag_name='false'", false);
+}
+
+TEST(ScudoFlagsDeathTest, BooleanFlags) {
+  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name", true),
+               "expected '='");
+  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=", true),
+               "invalid value for bool option: ''");
+  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=2", true),
+               "invalid value for bool option: '2'");
+  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=-1", true),
+               "invalid value for bool option: '-1'");
+  EXPECT_DEATH(testFlag(scudo::FlagType::FT_bool, false, "flag_name=on", true),
+               "invalid value for bool option: 'on'");
+}
+
+TEST(ScudoFlagsTest, IntFlags) {
+  testFlag(scudo::FlagType::FT_int, -11, nullptr, -11);
+  testFlag(scudo::FlagType::FT_int, -11, "flag_name=0", 0);
+  testFlag(scudo::FlagType::FT_int, -11, "flag_name='0'", 0);
+  testFlag(scudo::FlagType::FT_int, -11, "flag_name=42", 42);
+  testFlag(scudo::FlagType::FT_int, -11, "flag_name=-42", -42);
+  testFlag(scudo::FlagType::FT_int, -11, "flag_name=\"-42\"", -42);
+
+  // Unrecognized flags are ignored.
+  testFlag(scudo::FlagType::FT_int, -11, "--flag_name=42", -11);
+  testFlag(scudo::FlagType::FT_int, -11, "zzzzzzz=42", -11);
+}
+
+TEST(ScudoFlagsDeathTest, IntFlags) {
+  EXPECT_DEATH(testFlag(scudo::FlagType::FT_int, -11, "flag_name", 0),
+               "expected '='");
+  EXPECT_DEATH(testFlag(scudo::FlagType::FT_int, -11, "flag_name=42U", 0),
+               "invalid value for int option");
+}
+
+static void testTwoFlags(const char *Env, bool ExpectedFlag1,
+                         const int ExpectedFlag2, const char *Name1 = "flag1",
+                         const char *Name2 = "flag2") {
+  scudo::FlagParser Parser;
+  bool Flag1 = !ExpectedFlag1;
+  int Flag2;
+  Parser.registerFlag(Name1, FlagDesc, scudo::FlagType::FT_bool, &Flag1);
+  Parser.registerFlag(Name2, FlagDesc, scudo::FlagType::FT_int, &Flag2);
+  Parser.parseString(Env);
+  EXPECT_EQ(ExpectedFlag1, Flag1);
+  EXPECT_EQ(Flag2, ExpectedFlag2);
+  // Reporting unrecognized flags is needed to reset them.
+  scudo::reportUnrecognizedFlags();
+}
+
+TEST(ScudoFlagsTest, MultipleFlags) {
+  testTwoFlags("flag1=1 flag2=42", true, 42);
+  testTwoFlags("flag2=-1 flag1=0", false, -1);
+  testTwoFlags("flag1=false:flag2=1337", false, 1337);
+  testTwoFlags("flag2=42:flag1=yes", true, 42);
+  testTwoFlags("flag2=42\nflag1=yes", true, 42);
+  testTwoFlags("flag2=42\r\nflag1=yes", true, 42);
+  testTwoFlags("flag2=42\tflag1=yes", true, 42);
+}
+
+TEST(ScudoFlagsTest, CommonSuffixFlags) {
+  testTwoFlags("flag=1 other_flag=42", true, 42, "flag", "other_flag");
+  testTwoFlags("other_flag=42 flag=1", true, 42, "flag", "other_flag");
+}
+
+TEST(ScudoFlagsTest, AllocatorFlags) {
+  scudo::FlagParser Parser;
+  scudo::Flags Flags;
+  scudo::registerFlags(&Parser, &Flags);
+  Flags.setDefaults();
+  Flags.dealloc_type_mismatch = false;
+  Flags.delete_size_mismatch = false;
+  Flags.quarantine_max_chunk_size = 1024;
+  Parser.parseString("dealloc_type_mismatch=true:delete_size_mismatch=true:"
+                     "quarantine_max_chunk_size=2048");
+  EXPECT_TRUE(Flags.dealloc_type_mismatch);
+  EXPECT_TRUE(Flags.delete_size_mismatch);
+  EXPECT_EQ(2048, Flags.quarantine_max_chunk_size);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/list_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/list_test.cc
new file mode 100644
index 0000000..e4053d8
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/list_test.cc
@@ -0,0 +1,185 @@
+//===-- list_test.cc --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "scudo/standalone/list.h"
+#include "gtest/gtest.h"
+
+struct ListItem {
+  ListItem *Next;
+};
+
+typedef scudo::IntrusiveList<ListItem> List;
+
+static List StaticList;
+
+static void setList(List *L, ListItem *X = nullptr, ListItem *Y = nullptr,
+                    ListItem *Z = nullptr) {
+  L->clear();
+  if (X)
+    L->push_back(X);
+  if (Y)
+    L->push_back(Y);
+  if (Z)
+    L->push_back(Z);
+}
+
+static void checkList(List *L, ListItem *I1, ListItem *I2 = nullptr,
+                      ListItem *I3 = nullptr, ListItem *I4 = nullptr,
+                      ListItem *I5 = nullptr, ListItem *I6 = nullptr) {
+  if (I1) {
+    EXPECT_EQ(L->front(), I1);
+    L->pop_front();
+  }
+  if (I2) {
+    EXPECT_EQ(L->front(), I2);
+    L->pop_front();
+  }
+  if (I3) {
+    EXPECT_EQ(L->front(), I3);
+    L->pop_front();
+  }
+  if (I4) {
+    EXPECT_EQ(L->front(), I4);
+    L->pop_front();
+  }
+  if (I5) {
+    EXPECT_EQ(L->front(), I5);
+    L->pop_front();
+  }
+  if (I6) {
+    EXPECT_EQ(L->front(), I6);
+    L->pop_front();
+  }
+  EXPECT_TRUE(L->empty());
+}
+
+TEST(ScudoListTest, IntrusiveList) {
+  ListItem Items[6];
+  EXPECT_EQ(StaticList.size(), 0U);
+
+  List L;
+  L.clear();
+
+  ListItem *X = &Items[0];
+  ListItem *Y = &Items[1];
+  ListItem *Z = &Items[2];
+  ListItem *A = &Items[3];
+  ListItem *B = &Items[4];
+  ListItem *C = &Items[5];
+
+  EXPECT_EQ(L.size(), 0U);
+  L.push_back(X);
+  EXPECT_EQ(L.size(), 1U);
+  EXPECT_EQ(L.back(), X);
+  EXPECT_EQ(L.front(), X);
+  L.pop_front();
+  EXPECT_TRUE(L.empty());
+  L.checkConsistency();
+
+  L.push_front(X);
+  EXPECT_EQ(L.size(), 1U);
+  EXPECT_EQ(L.back(), X);
+  EXPECT_EQ(L.front(), X);
+  L.pop_front();
+  EXPECT_TRUE(L.empty());
+  L.checkConsistency();
+
+  L.push_front(X);
+  L.push_front(Y);
+  L.push_front(Z);
+  EXPECT_EQ(L.size(), 3U);
+  EXPECT_EQ(L.front(), Z);
+  EXPECT_EQ(L.back(), X);
+  L.checkConsistency();
+
+  L.pop_front();
+  EXPECT_EQ(L.size(), 2U);
+  EXPECT_EQ(L.front(), Y);
+  EXPECT_EQ(L.back(), X);
+  L.pop_front();
+  L.pop_front();
+  EXPECT_TRUE(L.empty());
+  L.checkConsistency();
+
+  L.push_back(X);
+  L.push_back(Y);
+  L.push_back(Z);
+  EXPECT_EQ(L.size(), 3U);
+  EXPECT_EQ(L.front(), X);
+  EXPECT_EQ(L.back(), Z);
+  L.checkConsistency();
+
+  L.pop_front();
+  EXPECT_EQ(L.size(), 2U);
+  EXPECT_EQ(L.front(), Y);
+  EXPECT_EQ(L.back(), Z);
+  L.pop_front();
+  L.pop_front();
+  EXPECT_TRUE(L.empty());
+  L.checkConsistency();
+
+  L.push_back(X);
+  L.push_back(Y);
+  L.push_back(Z);
+  L.extract(X, Y);
+  EXPECT_EQ(L.size(), 2U);
+  EXPECT_EQ(L.front(), X);
+  EXPECT_EQ(L.back(), Z);
+  L.checkConsistency();
+  L.extract(X, Z);
+  EXPECT_EQ(L.size(), 1U);
+  EXPECT_EQ(L.front(), X);
+  EXPECT_EQ(L.back(), X);
+  L.checkConsistency();
+  L.pop_front();
+  EXPECT_TRUE(L.empty());
+
+  List L1, L2;
+  L1.clear();
+  L2.clear();
+
+  L1.append_front(&L2);
+  EXPECT_TRUE(L1.empty());
+  EXPECT_TRUE(L2.empty());
+
+  L1.append_back(&L2);
+  EXPECT_TRUE(L1.empty());
+  EXPECT_TRUE(L2.empty());
+
+  setList(&L1, X);
+  checkList(&L1, X);
+
+  setList(&L1, X, Y, Z);
+  setList(&L2, A, B, C);
+  L1.append_back(&L2);
+  checkList(&L1, X, Y, Z, A, B, C);
+  EXPECT_TRUE(L2.empty());
+
+  setList(&L1, X, Y);
+  setList(&L2);
+  L1.append_front(&L2);
+  checkList(&L1, X, Y);
+  EXPECT_TRUE(L2.empty());
+}
+
+TEST(ScudoListTest, IntrusiveListAppendEmpty) {
+  ListItem I;
+  List L;
+  L.clear();
+  L.push_back(&I);
+  List L2;
+  L2.clear();
+  L.append_back(&L2);
+  EXPECT_EQ(L.back(), &I);
+  EXPECT_EQ(L.front(), &I);
+  EXPECT_EQ(L.size(), 1U);
+  L.append_front(&L2);
+  EXPECT_EQ(L.back(), &I);
+  EXPECT_EQ(L.front(), &I);
+  EXPECT_EQ(L.size(), 1U);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/map_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/map_test.cc
new file mode 100644
index 0000000..a645e23
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/map_test.cc
@@ -0,0 +1,71 @@
+//===-- map_test.cc ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "common.h"
+
+#include "gtest/gtest.h"
+
+#include <string.h>
+#include <unistd.h>
+
+static const char *MappingName = "scudo:test";
+
+TEST(ScudoMapTest, PageSize) {
+  EXPECT_EQ(scudo::getPageSizeCached(),
+            static_cast<scudo::uptr>(getpagesize()));
+}
+
+TEST(ScudoMapTest, MapNoAccessUnmap) {
+  const scudo::uptr Size = 4 * scudo::getPageSizeCached();
+  scudo::MapPlatformData Data = {};
+  void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &Data);
+  EXPECT_NE(P, nullptr);
+  EXPECT_DEATH(memset(P, 0xaa, Size), "");
+  scudo::unmap(P, Size, UNMAP_ALL, &Data);
+}
+
+TEST(ScudoMapTest, MapUnmap) {
+  const scudo::uptr Size = 4 * scudo::getPageSizeCached();
+  scudo::MapPlatformData Data = {};
+  void *P = scudo::map(nullptr, Size, MappingName, 0, &Data);
+  EXPECT_NE(P, nullptr);
+  memset(P, 0xaa, Size);
+  scudo::unmap(P, Size, 0, &Data);
+  EXPECT_DEATH(memset(P, 0xbb, Size), "");
+}
+
+TEST(ScudoMapTest, MapWithGuardUnmap) {
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  const scudo::uptr Size = 4 * PageSize;
+  scudo::MapPlatformData Data = {};
+  void *P = scudo::map(nullptr, Size + 2 * PageSize, MappingName, MAP_NOACCESS,
+                       &Data);
+  EXPECT_NE(P, nullptr);
+  void *Q =
+      reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) + PageSize);
+  EXPECT_EQ(scudo::map(Q, Size, MappingName, 0, &Data), Q);
+  memset(Q, 0xaa, Size);
+  EXPECT_DEATH(memset(Q, 0xaa, Size + 1), "");
+  scudo::unmap(P, Size + 2 * PageSize, UNMAP_ALL, &Data);
+}
+
+TEST(ScudoMapTest, MapGrowUnmap) {
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  const scudo::uptr Size = 4 * PageSize;
+  scudo::MapPlatformData Data = {};
+  void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &Data);
+  EXPECT_NE(P, nullptr);
+  void *Q =
+      reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(P) + PageSize);
+  EXPECT_EQ(scudo::map(Q, PageSize, MappingName, 0, &Data), Q);
+  memset(Q, 0xaa, PageSize);
+  Q = reinterpret_cast<void *>(reinterpret_cast<scudo::uptr>(Q) + PageSize);
+  EXPECT_EQ(scudo::map(Q, PageSize, MappingName, 0, &Data), Q);
+  memset(Q, 0xbb, PageSize);
+  scudo::unmap(P, Size, UNMAP_ALL, &Data);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/mutex_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/mutex_test.cc
new file mode 100644
index 0000000..930838c
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/mutex_test.cc
@@ -0,0 +1,102 @@
+//===-- mutex_test.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mutex.h"
+
+#include "gtest/gtest.h"
+
+#include <string.h>
+
+class TestData {
+public:
+  explicit TestData(scudo::HybridMutex &M) : Mutex(M) {
+    for (scudo::u32 I = 0; I < Size; I++)
+      Data[I] = 0;
+  }
+
+  void write() {
+    scudo::ScopedLock L(Mutex);
+    T V0 = Data[0];
+    for (scudo::u32 I = 0; I < Size; I++) {
+      EXPECT_EQ(Data[I], V0);
+      Data[I]++;
+    }
+  }
+
+  void tryWrite() {
+    if (!Mutex.tryLock())
+      return;
+    T V0 = Data[0];
+    for (scudo::u32 I = 0; I < Size; I++) {
+      EXPECT_EQ(Data[I], V0);
+      Data[I]++;
+    }
+    Mutex.unlock();
+  }
+
+  void backoff() {
+    volatile T LocalData[Size] = {};
+    for (scudo::u32 I = 0; I < Size; I++) {
+      LocalData[I]++;
+      EXPECT_EQ(LocalData[I], 1U);
+    }
+  }
+
+private:
+  static const scudo::u32 Size = 64U;
+  typedef scudo::u64 T;
+  scudo::HybridMutex &Mutex;
+  ALIGNED(SCUDO_CACHE_LINE_SIZE) T Data[Size];
+};
+
+const scudo::u32 NumberOfThreads = 8;
+#if SCUDO_DEBUG
+const scudo::u32 NumberOfIterations = 4 * 1024;
+#else
+const scudo::u32 NumberOfIterations = 16 * 1024;
+#endif
+
+static void *lockThread(void *Param) {
+  TestData *Data = reinterpret_cast<TestData *>(Param);
+  for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
+    Data->write();
+    Data->backoff();
+  }
+  return 0;
+}
+
+static void *tryThread(void *Param) {
+  TestData *Data = reinterpret_cast<TestData *>(Param);
+  for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
+    Data->tryWrite();
+    Data->backoff();
+  }
+  return 0;
+}
+
+TEST(ScudoMutexTest, Mutex) {
+  scudo::HybridMutex M;
+  M.init();
+  TestData Data(M);
+  pthread_t Threads[NumberOfThreads];
+  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
+    pthread_create(&Threads[I], 0, lockThread, &Data);
+  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
+    pthread_join(Threads[I], 0);
+}
+
+TEST(ScudoMutexTest, MutexTry) {
+  scudo::HybridMutex M;
+  M.init();
+  TestData Data(M);
+  pthread_t Threads[NumberOfThreads];
+  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
+    pthread_create(&Threads[I], 0, tryThread, &Data);
+  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
+    pthread_join(Threads[I], 0);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/primary_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/primary_test.cc
new file mode 100644
index 0000000..b4abbc2
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/primary_test.cc
@@ -0,0 +1,190 @@
+//===-- primary_test.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "primary32.h"
+#include "primary64.h"
+#include "size_class_map.h"
+
+#include "gtest/gtest.h"
+
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+// Note that with small enough regions, the SizeClassAllocator64 also works on
+// 32-bit architectures. It's not something we want to encourage, but we still
+// should ensure the tests pass.
+
+template <typename Primary> static void testPrimary() {
+  const scudo::uptr NumberOfAllocations = 32U;
+  auto Deleter = [](Primary *P) {
+    P->unmapTestOnly();
+    delete P;
+  };
+  std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter);
+  Allocator->init(/*ReleaseToOsInterval=*/-1);
+  typename Primary::CacheT Cache;
+  Cache.init(nullptr, Allocator.get());
+  for (scudo::uptr I = 0; I <= 16U; I++) {
+    const scudo::uptr Size = 1UL << I;
+    if (!Primary::canAllocate(Size))
+      continue;
+    const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
+    void *Pointers[NumberOfAllocations];
+    for (scudo::uptr J = 0; J < NumberOfAllocations; J++) {
+      void *P = Cache.allocate(ClassId);
+      memset(P, 'B', Size);
+      Pointers[J] = P;
+    }
+    for (scudo::uptr J = 0; J < NumberOfAllocations; J++)
+      Cache.deallocate(ClassId, Pointers[J]);
+  }
+  Cache.destroy(nullptr);
+  Allocator->releaseToOS();
+  Allocator->printStats();
+}
+
+TEST(ScudoPrimaryTest, BasicPrimary) {
+  using SizeClassMap = scudo::DefaultSizeClassMap;
+  testPrimary<scudo::SizeClassAllocator32<SizeClassMap, 18U>>();
+  testPrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U>>();
+}
+
+// The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
+// For the 32-bit one, it requires actually exhausting memory, so we skip it.
+TEST(ScudoPrimaryTest, Primary64OOM) {
+  using Primary = scudo::SizeClassAllocator64<scudo::DefaultSizeClassMap, 20U>;
+  using TransferBatch = Primary::CacheT::TransferBatch;
+  Primary Allocator;
+  Allocator.init(/*ReleaseToOsInterval=*/-1);
+  typename Primary::CacheT Cache;
+  scudo::GlobalStats Stats;
+  Stats.init();
+  Cache.init(&Stats, &Allocator);
+  bool AllocationFailed = false;
+  std::vector<TransferBatch *> Batches;
+  const scudo::uptr ClassId = Primary::SizeClassMap::LargestClassId;
+  const scudo::uptr Size = Primary::getSizeByClassId(ClassId);
+  for (scudo::uptr I = 0; I < 10000U; I++) {
+    TransferBatch *B = Allocator.popBatch(&Cache, ClassId);
+    if (!B) {
+      AllocationFailed = true;
+      break;
+    }
+    for (scudo::uptr J = 0; J < B->getCount(); J++)
+      memset(B->get(J), 'B', Size);
+    Batches.push_back(B);
+  }
+  while (!Batches.empty()) {
+    Allocator.pushBatch(ClassId, Batches.back());
+    Batches.pop_back();
+  }
+  Cache.destroy(nullptr);
+  Allocator.releaseToOS();
+  Allocator.printStats();
+  EXPECT_EQ(AllocationFailed, true);
+  Allocator.unmapTestOnly();
+}
+
+template <typename Primary> static void testIteratePrimary() {
+  auto Deleter = [](Primary *P) {
+    P->unmapTestOnly();
+    delete P;
+  };
+  std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter);
+  Allocator->init(/*ReleaseToOsInterval=*/-1);
+  typename Primary::CacheT Cache;
+  Cache.init(nullptr, Allocator.get());
+  std::vector<std::pair<scudo::uptr, void *>> V;
+  for (scudo::uptr I = 0; I < 64U; I++) {
+    const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize;
+    const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
+    void *P = Cache.allocate(ClassId);
+    V.push_back(std::make_pair(ClassId, P));
+  }
+  scudo::uptr Found = 0;
+  auto Lambda = [V, &Found](scudo::uptr Block) {
+    for (const auto &Pair : V) {
+      if (Pair.second == reinterpret_cast<void *>(Block))
+        Found++;
+    }
+  };
+  Allocator->disable();
+  Allocator->iterateOverBlocks(Lambda);
+  Allocator->enable();
+  EXPECT_EQ(Found, V.size());
+  while (!V.empty()) {
+    auto Pair = V.back();
+    Cache.deallocate(Pair.first, Pair.second);
+    V.pop_back();
+  }
+  Cache.destroy(nullptr);
+  Allocator->releaseToOS();
+  Allocator->printStats();
+}
+
+TEST(ScudoPrimaryTest, PrimaryIterate) {
+  using SizeClassMap = scudo::DefaultSizeClassMap;
+  testIteratePrimary<scudo::SizeClassAllocator32<SizeClassMap, 18U>>();
+  testIteratePrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U>>();
+}
+
+static std::mutex Mutex;
+static std::condition_variable Cv;
+static bool Ready = false;
+
+template <typename Primary> static void performAllocations(Primary *Allocator) {
+  static THREADLOCAL typename Primary::CacheT Cache;
+  Cache.init(nullptr, Allocator);
+  std::vector<std::pair<scudo::uptr, void *>> V;
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    while (!Ready)
+      Cv.wait(Lock);
+  }
+  for (scudo::uptr I = 0; I < 256U; I++) {
+    const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize / 4;
+    const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
+    void *P = Cache.allocate(ClassId);
+    if (P)
+      V.push_back(std::make_pair(ClassId, P));
+  }
+  while (!V.empty()) {
+    auto Pair = V.back();
+    Cache.deallocate(Pair.first, Pair.second);
+    V.pop_back();
+  }
+  Cache.destroy(nullptr);
+}
+
+template <typename Primary> static void testPrimaryThreaded() {
+  auto Deleter = [](Primary *P) {
+    P->unmapTestOnly();
+    delete P;
+  };
+  std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter);
+  Allocator->init(/*ReleaseToOsInterval=*/-1);
+  std::thread Threads[32];
+  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
+    Threads[I] = std::thread(performAllocations<Primary>, Allocator.get());
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    Ready = true;
+    Cv.notify_all();
+  }
+  for (auto &T : Threads)
+    T.join();
+  Allocator->releaseToOS();
+  Allocator->printStats();
+}
+
+TEST(ScudoPrimaryTest, PrimaryThreaded) {
+  using SizeClassMap = scudo::SvelteSizeClassMap;
+  testPrimaryThreaded<scudo::SizeClassAllocator32<SizeClassMap, 18U>>();
+  testPrimaryThreaded<scudo::SizeClassAllocator64<SizeClassMap, 24U>>();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/quarantine_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/quarantine_test.cc
new file mode 100644
index 0000000..476a507
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/quarantine_test.cc
@@ -0,0 +1,240 @@
+//===-- quarantine_test.cc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "quarantine.h"
+
+#include "gtest/gtest.h"
+
+#include <stdlib.h>
+
+static void *FakePtr = reinterpret_cast<void *>(0xFA83FA83);
+static const scudo::uptr BlockSize = 8UL;
+static const scudo::uptr LargeBlockSize = 16384UL;
+
+struct QuarantineCallback {
+  void recycle(void *P) { EXPECT_EQ(P, FakePtr); }
+  void *allocate(scudo::uptr Size) { return malloc(Size); }
+  void deallocate(void *P) { free(P); }
+};
+
+typedef scudo::GlobalQuarantine<QuarantineCallback, void> QuarantineT;
+typedef typename QuarantineT::CacheT CacheT;
+
+static QuarantineCallback Cb;
+
+static void deallocateCache(CacheT *Cache) {
+  while (scudo::QuarantineBatch *Batch = Cache->dequeueBatch())
+    Cb.deallocate(Batch);
+}
+
+TEST(ScudoQuarantineTest, QuarantineBatchMerge) {
+  // Verify the trivial case.
+  scudo::QuarantineBatch Into;
+  Into.init(FakePtr, 4UL);
+  scudo::QuarantineBatch From;
+  From.init(FakePtr, 8UL);
+
+  Into.merge(&From);
+
+  EXPECT_EQ(Into.Count, 2UL);
+  EXPECT_EQ(Into.Batch[0], FakePtr);
+  EXPECT_EQ(Into.Batch[1], FakePtr);
+  EXPECT_EQ(Into.Size, 12UL + sizeof(scudo::QuarantineBatch));
+  EXPECT_EQ(Into.getQuarantinedSize(), 12UL);
+
+  EXPECT_EQ(From.Count, 0UL);
+  EXPECT_EQ(From.Size, sizeof(scudo::QuarantineBatch));
+  EXPECT_EQ(From.getQuarantinedSize(), 0UL);
+
+  // Merge the batch to the limit.
+  for (scudo::uptr I = 2; I < scudo::QuarantineBatch::MaxCount; ++I)
+    From.push_back(FakePtr, 8UL);
+  EXPECT_TRUE(Into.Count + From.Count == scudo::QuarantineBatch::MaxCount);
+  EXPECT_TRUE(Into.canMerge(&From));
+
+  Into.merge(&From);
+  EXPECT_TRUE(Into.Count == scudo::QuarantineBatch::MaxCount);
+
+  // No more space, not even for one element.
+  From.init(FakePtr, 8UL);
+
+  EXPECT_FALSE(Into.canMerge(&From));
+}
+
+TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesEmpty) {
+  CacheT Cache;
+  CacheT ToDeallocate;
+  Cache.init();
+  ToDeallocate.init();
+  Cache.mergeBatches(&ToDeallocate);
+
+  EXPECT_EQ(ToDeallocate.getSize(), 0UL);
+  EXPECT_EQ(ToDeallocate.dequeueBatch(), nullptr);
+}
+
+TEST(SanitizerCommon, QuarantineCacheMergeBatchesOneBatch) {
+  CacheT Cache;
+  Cache.init();
+  Cache.enqueue(Cb, FakePtr, BlockSize);
+  EXPECT_EQ(BlockSize + sizeof(scudo::QuarantineBatch), Cache.getSize());
+
+  CacheT ToDeallocate;
+  ToDeallocate.init();
+  Cache.mergeBatches(&ToDeallocate);
+
+  // Nothing to merge, nothing to deallocate.
+  EXPECT_EQ(BlockSize + sizeof(scudo::QuarantineBatch), Cache.getSize());
+
+  EXPECT_EQ(ToDeallocate.getSize(), 0UL);
+  EXPECT_EQ(ToDeallocate.dequeueBatch(), nullptr);
+
+  deallocateCache(&Cache);
+}
+
+TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesSmallBatches) {
+  // Make a Cache with two batches small enough to merge.
+  CacheT From;
+  From.init();
+  From.enqueue(Cb, FakePtr, BlockSize);
+  CacheT Cache;
+  Cache.init();
+  Cache.enqueue(Cb, FakePtr, BlockSize);
+
+  Cache.transfer(&From);
+  EXPECT_EQ(BlockSize * 2 + sizeof(scudo::QuarantineBatch) * 2,
+            Cache.getSize());
+
+  CacheT ToDeallocate;
+  ToDeallocate.init();
+  Cache.mergeBatches(&ToDeallocate);
+
+  // Batches merged, one batch to deallocate.
+  EXPECT_EQ(BlockSize * 2 + sizeof(scudo::QuarantineBatch), Cache.getSize());
+  EXPECT_EQ(ToDeallocate.getSize(), sizeof(scudo::QuarantineBatch));
+
+  deallocateCache(&Cache);
+  deallocateCache(&ToDeallocate);
+}
+
+TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesTooBigToMerge) {
+  const scudo::uptr NumBlocks = scudo::QuarantineBatch::MaxCount - 1;
+
+  // Make a Cache with two batches small enough to merge.
+  CacheT From;
+  CacheT Cache;
+  From.init();
+  Cache.init();
+  for (scudo::uptr I = 0; I < NumBlocks; ++I) {
+    From.enqueue(Cb, FakePtr, BlockSize);
+    Cache.enqueue(Cb, FakePtr, BlockSize);
+  }
+  Cache.transfer(&From);
+  EXPECT_EQ(BlockSize * NumBlocks * 2 + sizeof(scudo::QuarantineBatch) * 2,
+            Cache.getSize());
+
+  CacheT ToDeallocate;
+  ToDeallocate.init();
+  Cache.mergeBatches(&ToDeallocate);
+
+  // Batches cannot be merged.
+  EXPECT_EQ(BlockSize * NumBlocks * 2 + sizeof(scudo::QuarantineBatch) * 2,
+            Cache.getSize());
+  EXPECT_EQ(ToDeallocate.getSize(), 0UL);
+
+  deallocateCache(&Cache);
+}
+
+TEST(ScudoQuarantineTest, QuarantineCacheMergeBatchesALotOfBatches) {
+  const scudo::uptr NumBatchesAfterMerge = 3;
+  const scudo::uptr NumBlocks =
+      scudo::QuarantineBatch::MaxCount * NumBatchesAfterMerge;
+  const scudo::uptr NumBatchesBeforeMerge = NumBlocks;
+
+  // Make a Cache with many small batches.
+  CacheT Cache;
+  Cache.init();
+  for (scudo::uptr I = 0; I < NumBlocks; ++I) {
+    CacheT From;
+    From.init();
+    From.enqueue(Cb, FakePtr, BlockSize);
+    Cache.transfer(&From);
+  }
+
+  EXPECT_EQ(BlockSize * NumBlocks +
+                sizeof(scudo::QuarantineBatch) * NumBatchesBeforeMerge,
+            Cache.getSize());
+
+  CacheT ToDeallocate;
+  ToDeallocate.init();
+  Cache.mergeBatches(&ToDeallocate);
+
+  // All blocks should fit Into 3 batches.
+  EXPECT_EQ(BlockSize * NumBlocks +
+                sizeof(scudo::QuarantineBatch) * NumBatchesAfterMerge,
+            Cache.getSize());
+
+  EXPECT_EQ(ToDeallocate.getSize(),
+            sizeof(scudo::QuarantineBatch) *
+                (NumBatchesBeforeMerge - NumBatchesAfterMerge));
+
+  deallocateCache(&Cache);
+  deallocateCache(&ToDeallocate);
+}
+
+static const scudo::uptr MaxQuarantineSize = 1024UL << 10; // 1MB
+static const scudo::uptr MaxCacheSize = 256UL << 10;       // 256KB
+
+TEST(ScudoQuarantineTest, GlobalQuarantine) {
+  QuarantineT Quarantine;
+  CacheT Cache;
+  Cache.init();
+  Quarantine.init(MaxQuarantineSize, MaxCacheSize);
+  EXPECT_EQ(Quarantine.getMaxSize(), MaxQuarantineSize);
+  EXPECT_EQ(Quarantine.getCacheSize(), MaxCacheSize);
+
+  bool DrainOccurred = false;
+  scudo::uptr CacheSize = Cache.getSize();
+  EXPECT_EQ(Cache.getSize(), 0UL);
+  // We quarantine enough blocks that a drain has to occur. Verify this by
+  // looking for a decrease of the size of the cache.
+  for (scudo::uptr I = 0; I < 128UL; I++) {
+    Quarantine.put(&Cache, Cb, FakePtr, LargeBlockSize);
+    if (!DrainOccurred && Cache.getSize() < CacheSize)
+      DrainOccurred = true;
+    CacheSize = Cache.getSize();
+  }
+  EXPECT_TRUE(DrainOccurred);
+
+  Quarantine.drainAndRecycle(&Cache, Cb);
+  EXPECT_EQ(Cache.getSize(), 0UL);
+
+  Quarantine.printStats();
+}
+
+void *populateQuarantine(void *Param) {
+  CacheT Cache;
+  Cache.init();
+  QuarantineT *Quarantine = reinterpret_cast<QuarantineT *>(Param);
+  for (scudo::uptr I = 0; I < 128UL; I++)
+    Quarantine->put(&Cache, Cb, FakePtr, LargeBlockSize);
+  return 0;
+}
+
+TEST(ScudoQuarantineTest, ThreadedGlobalQuarantine) {
+  QuarantineT Quarantine;
+  Quarantine.init(MaxQuarantineSize, MaxCacheSize);
+
+  const scudo::uptr NumberOfThreads = 32U;
+  pthread_t T[NumberOfThreads];
+  for (scudo::uptr I = 0; I < NumberOfThreads; I++)
+    pthread_create(&T[I], 0, populateQuarantine, &Quarantine);
+  for (scudo::uptr I = 0; I < NumberOfThreads; I++)
+    pthread_join(T[I], 0);
+
+  Quarantine.printStats();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/release_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/release_test.cc
new file mode 100644
index 0000000..2279d5d
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/release_test.cc
@@ -0,0 +1,260 @@
+//===-- release_test.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "list.h"
+#include "release.h"
+#include "size_class_map.h"
+
+#include "gtest/gtest.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <random>
+
+TEST(ScudoReleaseTest, PackedCounterArray) {
+  for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
+    // Various valid counter's max values packed into one word.
+    scudo::PackedCounterArray Counters2N(1, 1UL << I);
+    EXPECT_EQ(sizeof(scudo::uptr), Counters2N.getBufferSize());
+    // Check the "all bit set" values too.
+    scudo::PackedCounterArray Counters2N1_1(1, ~0UL >> I);
+    EXPECT_EQ(sizeof(scudo::uptr), Counters2N1_1.getBufferSize());
+    // Verify the packing ratio, the counter is Expected to be packed into the
+    // closest power of 2 bits.
+    scudo::PackedCounterArray Counters(SCUDO_WORDSIZE, 1UL << I);
+    EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpToPowerOfTwo(I + 1),
+              Counters.getBufferSize());
+  }
+
+  // Go through 1, 2, 4, 8, .. {32,64} bits per counter.
+  for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
+    // Make sure counters request one memory page for the buffer.
+    const scudo::uptr NumCounters =
+        (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
+    scudo::PackedCounterArray Counters(NumCounters, 1UL << ((1UL << I) - 1));
+    Counters.inc(0);
+    for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
+      EXPECT_EQ(0UL, Counters.get(C));
+      Counters.inc(C);
+      EXPECT_EQ(1UL, Counters.get(C - 1));
+    }
+    EXPECT_EQ(0UL, Counters.get(NumCounters - 1));
+    Counters.inc(NumCounters - 1);
+    if (I > 0) {
+      Counters.incRange(0, NumCounters - 1);
+      for (scudo::uptr C = 0; C < NumCounters; C++)
+        EXPECT_EQ(2UL, Counters.get(C));
+    }
+  }
+}
+
+class StringRangeRecorder {
+public:
+  std::string ReportedPages;
+
+  StringRangeRecorder()
+      : PageSizeScaledLog(scudo::getLog2(scudo::getPageSizeCached())) {}
+
+  void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
+    From >>= PageSizeScaledLog;
+    To >>= PageSizeScaledLog;
+    EXPECT_LT(From, To);
+    if (!ReportedPages.empty())
+      EXPECT_LT(LastPageReported, From);
+    ReportedPages.append(From - LastPageReported, '.');
+    ReportedPages.append(To - From, 'x');
+    LastPageReported = To;
+  }
+
+private:
+  const scudo::uptr PageSizeScaledLog;
+  scudo::uptr LastPageReported = 0;
+};
+
+TEST(ScudoReleaseTest, FreePagesRangeTracker) {
+  // 'x' denotes a page to be released, '.' denotes a page to be kept around.
+  const char *TestCases[] = {
+      "",
+      ".",
+      "x",
+      "........",
+      "xxxxxxxxxxx",
+      "..............xxxxx",
+      "xxxxxxxxxxxxxxxxxx.....",
+      "......xxxxxxxx........",
+      "xxx..........xxxxxxxxxxxxxxx",
+      "......xxxx....xxxx........",
+      "xxx..........xxxxxxxx....xxxxxxx",
+      "x.x.x.x.x.x.x.x.x.x.x.x.",
+      ".x.x.x.x.x.x.x.x.x.x.x.x",
+      ".x.x.x.x.x.x.x.x.x.x.x.x.",
+      "x.x.x.x.x.x.x.x.x.x.x.x.x",
+  };
+  typedef scudo::FreePagesRangeTracker<StringRangeRecorder> RangeTracker;
+
+  for (auto TestCase : TestCases) {
+    StringRangeRecorder Recorder;
+    RangeTracker Tracker(&Recorder);
+    for (scudo::uptr I = 0; TestCase[I] != 0; I++)
+      Tracker.processNextPage(TestCase[I] == 'x');
+    Tracker.finish();
+    // Strip trailing '.'-pages before comparing the results as they are not
+    // going to be reported to range_recorder anyway.
+    const char *LastX = strrchr(TestCase, 'x');
+    std::string Expected(TestCase,
+                         LastX == nullptr ? 0 : (LastX - TestCase + 1));
+    EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
+  }
+}
+
+class ReleasedPagesRecorder {
+public:
+  std::set<scudo::uptr> ReportedPages;
+
+  void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
+    const scudo::uptr PageSize = scudo::getPageSizeCached();
+    for (scudo::uptr I = From; I < To; I += PageSize)
+      ReportedPages.insert(I);
+  }
+};
+
+// Simplified version of a TransferBatch.
+template <class SizeClassMap> struct FreeBatch {
+  static const scudo::u32 MaxCount = SizeClassMap::MaxNumCachedHint;
+  void clear() { Count = 0; }
+  void add(scudo::uptr P) {
+    DCHECK_LT(Count, MaxCount);
+    Batch[Count++] = P;
+  }
+  scudo::u32 getCount() const { return Count; }
+  scudo::uptr get(scudo::u32 I) const {
+    DCHECK_LE(I, Count);
+    return Batch[I];
+  }
+  FreeBatch *Next;
+
+private:
+  scudo::u32 Count;
+  scudo::uptr Batch[MaxCount];
+};
+
+template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
+  typedef FreeBatch<SizeClassMap> Batch;
+  const scudo::uptr AllocatedPagesCount = 1024;
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  std::mt19937 R;
+  scudo::u32 RandState = 42;
+
+  for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
+    const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
+    const scudo::uptr MaxBlocks = AllocatedPagesCount * PageSize / BlockSize;
+
+    // Generate the random free list.
+    std::vector<scudo::uptr> FreeArray;
+    bool InFreeRange = false;
+    scudo::uptr CurrentRangeEnd = 0;
+    for (scudo::uptr I = 0; I < MaxBlocks; I++) {
+      if (I == CurrentRangeEnd) {
+        InFreeRange = (scudo::getRandomU32(&RandState) & 1U) == 1;
+        CurrentRangeEnd += (scudo::getRandomU32(&RandState) & 0x7f) + 1;
+      }
+      if (InFreeRange)
+        FreeArray.push_back(I * BlockSize);
+    }
+    if (FreeArray.empty())
+      continue;
+    // Shuffle the array to ensure that the order is irrelevant.
+    std::shuffle(FreeArray.begin(), FreeArray.end(), R);
+
+    // Build the FreeList from the FreeArray.
+    scudo::IntrusiveList<Batch> FreeList;
+    FreeList.clear();
+    Batch *CurrentBatch = nullptr;
+    for (auto const &Block : FreeArray) {
+      if (!CurrentBatch) {
+        CurrentBatch = new Batch;
+        CurrentBatch->clear();
+        FreeList.push_back(CurrentBatch);
+      }
+      CurrentBatch->add(Block);
+      if (CurrentBatch->getCount() == Batch::MaxCount)
+        CurrentBatch = nullptr;
+    }
+
+    // Release the memory.
+    ReleasedPagesRecorder Recorder;
+    releaseFreeMemoryToOS(&FreeList, 0, AllocatedPagesCount, BlockSize,
+                          &Recorder);
+
+    // Verify that there are no released pages touched by used chunks and all
+    // ranges of free chunks big enough to contain the entire memory pages had
+    // these pages released.
+    scudo::uptr VerifiedReleasedPages = 0;
+    std::set<scudo::uptr> FreeBlocks(FreeArray.begin(), FreeArray.end());
+
+    scudo::uptr CurrentBlock = 0;
+    InFreeRange = false;
+    scudo::uptr CurrentFreeRangeStart = 0;
+    for (scudo::uptr I = 0; I <= MaxBlocks; I++) {
+      const bool IsFreeBlock =
+          FreeBlocks.find(CurrentBlock) != FreeBlocks.end();
+      if (IsFreeBlock) {
+        if (!InFreeRange) {
+          InFreeRange = true;
+          CurrentFreeRangeStart = CurrentBlock;
+        }
+      } else {
+        // Verify that this used chunk does not touch any released page.
+        const scudo::uptr StartPage = CurrentBlock / PageSize;
+        const scudo::uptr EndPage = (CurrentBlock + BlockSize - 1) / PageSize;
+        for (scudo::uptr J = StartPage; J <= EndPage; J++) {
+          const bool PageReleased = Recorder.ReportedPages.find(J * PageSize) !=
+                                    Recorder.ReportedPages.end();
+          EXPECT_EQ(false, PageReleased);
+        }
+
+        if (InFreeRange) {
+          InFreeRange = false;
+          // Verify that all entire memory pages covered by this range of free
+          // chunks were released.
+          scudo::uptr P = scudo::roundUpTo(CurrentFreeRangeStart, PageSize);
+          while (P + PageSize <= CurrentBlock) {
+            const bool PageReleased =
+                Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
+            EXPECT_EQ(true, PageReleased);
+            VerifiedReleasedPages++;
+            P += PageSize;
+          }
+        }
+      }
+
+      CurrentBlock += BlockSize;
+    }
+
+    EXPECT_EQ(Recorder.ReportedPages.size(), VerifiedReleasedPages);
+
+    while (!FreeList.empty()) {
+      CurrentBatch = FreeList.front();
+      FreeList.pop_front();
+      delete CurrentBatch;
+    }
+  }
+}
+
+TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
+  testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
+}
+
+TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSAndroid) {
+  testReleaseFreeMemoryToOS<scudo::AndroidSizeClassMap>();
+}
+
+TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSSvelte) {
+  testReleaseFreeMemoryToOS<scudo::SvelteSizeClassMap>();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/report_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/report_test.cc
new file mode 100644
index 0000000..ce7eda5
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/report_test.cc
@@ -0,0 +1,47 @@
+//===-- report_test.cc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "scudo/standalone/report.h"
+#include "gtest/gtest.h"
+
+TEST(ScudoReportTest, Generic) {
+  void *P = reinterpret_cast<void *>(0x42424242U);
+  EXPECT_DEATH(scudo::reportError("TEST123"), "Scudo ERROR.*TEST123");
+  EXPECT_DEATH(scudo::reportInvalidFlag("ABC", "DEF"), "Scudo ERROR.*ABC.*DEF");
+  EXPECT_DEATH(scudo::reportHeaderCorruption(P), "Scudo ERROR.*42424242");
+  EXPECT_DEATH(scudo::reportHeaderRace(P), "Scudo ERROR.*42424242");
+  EXPECT_DEATH(scudo::reportSanityCheckError("XYZ"), "Scudo ERROR.*XYZ");
+  EXPECT_DEATH(scudo::reportAlignmentTooBig(123, 456), "Scudo ERROR.*123.*456");
+  EXPECT_DEATH(scudo::reportAllocationSizeTooBig(123, 456, 789),
+               "Scudo ERROR.*123.*456.*789");
+  EXPECT_DEATH(scudo::reportOutOfMemory(4242), "Scudo ERROR.*4242");
+  EXPECT_DEATH(
+      scudo::reportInvalidChunkState(scudo::AllocatorAction::Recycling, P),
+      "Scudo ERROR.*recycling.*42424242");
+  EXPECT_DEATH(
+      scudo::reportInvalidChunkState(scudo::AllocatorAction::Sizing, P),
+      "Scudo ERROR.*sizing.*42424242");
+  EXPECT_DEATH(
+      scudo::reportMisalignedPointer(scudo::AllocatorAction::Deallocating, P),
+      "Scudo ERROR.*deallocating.*42424242");
+  EXPECT_DEATH(scudo::reportDeallocTypeMismatch(
+                   scudo::AllocatorAction::Reallocating, P, 0, 1),
+               "Scudo ERROR.*reallocating.*42424242");
+  EXPECT_DEATH(scudo::reportDeleteSizeMismatch(P, 123, 456),
+               "Scudo ERROR.*42424242.*123.*456");
+}
+
+TEST(ScudoReportTest, CSpecific) {
+  EXPECT_DEATH(scudo::reportAlignmentNotPowerOfTwo(123), "Scudo ERROR.*123");
+  EXPECT_DEATH(scudo::reportCallocOverflow(123, 456), "Scudo ERROR.*123.*456");
+  EXPECT_DEATH(scudo::reportInvalidPosixMemalignAlignment(789),
+               "Scudo ERROR.*789");
+  EXPECT_DEATH(scudo::reportPvallocOverflow(123), "Scudo ERROR.*123");
+  EXPECT_DEATH(scudo::reportInvalidAlignedAllocAlignment(123, 456),
+               "Scudo ERROR.*123.*456");
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/scudo_unit_test_main.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/scudo_unit_test_main.cc
new file mode 100644
index 0000000..16398e5
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/scudo_unit_test_main.cc
@@ -0,0 +1,14 @@
+//===-- scudo_unit_test_main.cc ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gtest/gtest.h"
+
+int main(int argc, char **argv) {
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/secondary_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/secondary_test.cc
new file mode 100644
index 0000000..09cd8a2
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/secondary_test.cc
@@ -0,0 +1,137 @@
+//===-- secondary_test.cc ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "secondary.h"
+
+#include "gtest/gtest.h"
+
+#include <stdio.h>
+
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+TEST(ScudoSecondaryTest, SecondaryBasic) {
+  scudo::GlobalStats S;
+  S.init();
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(&S);
+  const scudo::uptr Size = 1U << 16;
+  void *P = L->allocate(Size);
+  EXPECT_NE(P, nullptr);
+  memset(P, 'A', Size);
+  EXPECT_GE(scudo::MapAllocator::getBlockSize(P), Size);
+  L->deallocate(P);
+  EXPECT_DEATH(memset(P, 'A', Size), "");
+
+  const scudo::uptr Align = 1U << 16;
+  P = L->allocate(Size + Align, Align);
+  EXPECT_NE(P, nullptr);
+  void *AlignedP = reinterpret_cast<void *>(
+      scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
+  memset(AlignedP, 'A', Size);
+  L->deallocate(P);
+
+  std::vector<void *> V;
+  for (scudo::uptr I = 0; I < 32U; I++)
+    V.push_back(L->allocate(Size));
+  std::random_shuffle(V.begin(), V.end());
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+  L->printStats();
+}
+
+// This exercises a variety of combinations of size and alignment for the
+// MapAllocator. The size computation done here mimic the ones done by the
+// combined allocator.
+TEST(ScudoSecondaryTest, SecondaryCombinations) {
+  constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
+  constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign);
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
+    for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
+         AlignLog++) {
+      const scudo::uptr Align = 1U << AlignLog;
+      for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
+        if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
+          continue;
+        const scudo::uptr UserSize =
+            scudo::roundUpTo((1U << SizeLog) + Delta, MinAlign);
+        const scudo::uptr Size =
+            HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
+        void *P = L->allocate(Size, Align);
+        EXPECT_NE(P, nullptr);
+        void *AlignedP = reinterpret_cast<void *>(
+            scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
+        memset(AlignedP, 0xff, UserSize);
+        L->deallocate(P);
+      }
+    }
+  }
+  L->printStats();
+}
+
+TEST(ScudoSecondaryTest, SecondaryIterate) {
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  std::vector<void *> V;
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  for (scudo::uptr I = 0; I < 32U; I++)
+    V.push_back(L->allocate((std::rand() % 16) * PageSize));
+  auto Lambda = [V](scudo::uptr Block) {
+    EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
+              V.end());
+  };
+  L->disable();
+  L->iterateOverBlocks(Lambda);
+  L->enable();
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+  L->printStats();
+}
+
+static std::mutex Mutex;
+static std::condition_variable Cv;
+static bool Ready = false;
+
+static void performAllocations(scudo::MapAllocator *L) {
+  std::vector<void *> V;
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    while (!Ready)
+      Cv.wait(Lock);
+  }
+  for (scudo::uptr I = 0; I < 32U; I++)
+    V.push_back(L->allocate((std::rand() % 16) * PageSize));
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+}
+
+TEST(ScudoSecondaryTest, SecondaryThreadsRace) {
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  std::thread Threads[10];
+  for (scudo::uptr I = 0; I < 10U; I++)
+    Threads[I] = std::thread(performAllocations, L);
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    Ready = true;
+    Cv.notify_all();
+  }
+  for (auto &T : Threads)
+    T.join();
+  L->printStats();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cc
new file mode 100644
index 0000000..d857aa4
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/size_class_map_test.cc
@@ -0,0 +1,38 @@
+//===-- size_class_map_test.cc ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "scudo/standalone/size_class_map.h"
+#include "gtest/gtest.h"
+
+template <class SizeClassMap> void testSizeClassMap() {
+  typedef SizeClassMap SCMap;
+  SCMap::print();
+  SCMap::validate();
+}
+
+TEST(ScudoSizeClassMapTest, DefaultSizeClassMap) {
+  testSizeClassMap<scudo::DefaultSizeClassMap>();
+}
+
+TEST(ScudoSizeClassMapTest, SvelteSizeClassMap) {
+  testSizeClassMap<scudo::SvelteSizeClassMap>();
+}
+
+TEST(ScudoSizeClassMapTest, AndroidSizeClassMap) {
+  testSizeClassMap<scudo::AndroidSizeClassMap>();
+}
+
+TEST(ScudoSizeClassMapTest, OneClassSizeClassMap) {
+  testSizeClassMap<scudo::SizeClassMap<1, 5, 5, 5, 0, 0>>();
+}
+
+#if SCUDO_CAN_USE_PRIMARY64
+TEST(ScudoSizeClassMapTest, LargeMaxSizeClassMap) {
+  testSizeClassMap<scudo::SizeClassMap<3, 4, 8, 63, 128, 16>>();
+}
+#endif
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/stats_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/stats_test.cc
new file mode 100644
index 0000000..9ed105d
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/stats_test.cc
@@ -0,0 +1,45 @@
+//===-- stats_test.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "scudo/standalone/stats.h"
+#include "gtest/gtest.h"
+
+TEST(ScudoStatsTest, LocalStats) {
+  scudo::LocalStats LStats;
+  LStats.init();
+  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
+    EXPECT_EQ(LStats.get(static_cast<scudo::StatType>(I)), 0U);
+  LStats.add(scudo::StatAllocated, 4096U);
+  EXPECT_EQ(LStats.get(scudo::StatAllocated), 4096U);
+  LStats.sub(scudo::StatAllocated, 4096U);
+  EXPECT_EQ(LStats.get(scudo::StatAllocated), 0U);
+  LStats.set(scudo::StatAllocated, 4096U);
+  EXPECT_EQ(LStats.get(scudo::StatAllocated), 4096U);
+}
+
+TEST(ScudoStatsTest, GlobalStats) {
+  scudo::GlobalStats GStats;
+  GStats.init();
+  scudo::uptr Counters[scudo::StatCount] = {};
+  GStats.get(Counters);
+  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
+    EXPECT_EQ(Counters[I], 0U);
+  scudo::LocalStats LStats;
+  LStats.init();
+  GStats.link(&LStats);
+  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
+    LStats.add(static_cast<scudo::StatType>(I), 4096U);
+  GStats.get(Counters);
+  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
+    EXPECT_EQ(Counters[I], 4096U);
+  // Unlinking the local stats move numbers to the global stats.
+  GStats.unlink(&LStats);
+  GStats.get(Counters);
+  for (scudo::uptr I = 0; I < scudo::StatCount; I++)
+    EXPECT_EQ(Counters[I], 4096U);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/strings_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/strings_test.cc
new file mode 100644
index 0000000..31e59c4
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/strings_test.cc
@@ -0,0 +1,98 @@
+//===-- strings_test.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "scudo/standalone/string_utils.h"
+#include "gtest/gtest.h"
+
+#include <limits.h>
+
+TEST(ScudoStringsTest, Basic) {
+  scudo::ScopedString Str(128);
+  Str.append("a%db%zdc%ue%zuf%xh%zxq%pe%sr", static_cast<int>(-1),
+             static_cast<scudo::uptr>(-2), static_cast<unsigned>(-4),
+             static_cast<scudo::uptr>(5), static_cast<unsigned>(10),
+             static_cast<scudo::uptr>(11), reinterpret_cast<void *>(0x123),
+             "_string_");
+  EXPECT_EQ(Str.length(), strlen(Str.data()));
+
+  std::string expectedString = "a-1b-2c4294967292e5fahbq0x";
+  expectedString += std::string(SCUDO_POINTER_FORMAT_LENGTH - 3, '0');
+  expectedString += "123e_string_r";
+  EXPECT_EQ(Str.length(), strlen(Str.data()));
+  EXPECT_STREQ(expectedString.c_str(), Str.data());
+}
+
+TEST(ScudoStringsTest, Precision) {
+  scudo::ScopedString Str(128);
+  Str.append("%.*s", 3, "12345");
+  EXPECT_EQ(Str.length(), strlen(Str.data()));
+  EXPECT_STREQ("123", Str.data());
+  Str.clear();
+  Str.append("%.*s", 6, "12345");
+  EXPECT_EQ(Str.length(), strlen(Str.data()));
+  EXPECT_STREQ("12345", Str.data());
+  Str.clear();
+  Str.append("%-6s", "12345");
+  EXPECT_EQ(Str.length(), strlen(Str.data()));
+  EXPECT_STREQ("12345 ", Str.data());
+}
+
+static void fillString(scudo::ScopedString &Str, scudo::uptr Size) {
+  for (scudo::uptr I = 0; I < Size; I++)
+    Str.append("A");
+}
+
+TEST(ScudoStringTest, PotentialOverflows) {
+  // Use a ScopedString that spans a page, and attempt to write past the end
+  // of it with variations of append. The expectation is for nothing to crash.
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  scudo::ScopedString Str(PageSize);
+  Str.clear();
+  fillString(Str, 2 * PageSize);
+  Str.clear();
+  fillString(Str, PageSize - 64);
+  Str.append("%-128s", "12345");
+  Str.clear();
+  fillString(Str, PageSize - 16);
+  Str.append("%024x", 12345);
+  Str.clear();
+  fillString(Str, PageSize - 16);
+  Str.append("EEEEEEEEEEEEEEEEEEEEEEEE");
+}
+
+template <typename T>
+static void testAgainstLibc(const char *Format, T Arg1, T Arg2) {
+  scudo::ScopedString Str(128);
+  Str.append(Format, Arg1, Arg2);
+  char Buffer[128];
+  snprintf(Buffer, sizeof(Buffer), Format, Arg1, Arg2);
+  EXPECT_EQ(Str.length(), strlen(Str.data()));
+  EXPECT_STREQ(Buffer, Str.data());
+}
+
+TEST(ScudoStringsTest, MinMax) {
+  testAgainstLibc<int>("%d-%d", INT_MIN, INT_MAX);
+  testAgainstLibc<unsigned>("%u-%u", 0, UINT_MAX);
+  testAgainstLibc<unsigned>("%x-%x", 0, UINT_MAX);
+  testAgainstLibc<long>("%zd-%zd", LONG_MIN, LONG_MAX);
+  testAgainstLibc<unsigned long>("%zu-%zu", 0, ULONG_MAX);
+  testAgainstLibc<unsigned long>("%zx-%zx", 0, ULONG_MAX);
+}
+
+TEST(ScudoStringsTest, Padding) {
+  testAgainstLibc<int>("%3d - %3d", 1, 0);
+  testAgainstLibc<int>("%3d - %3d", -1, 123);
+  testAgainstLibc<int>("%3d - %3d", -1, -123);
+  testAgainstLibc<int>("%3d - %3d", 12, 1234);
+  testAgainstLibc<int>("%3d - %3d", -12, -1234);
+  testAgainstLibc<int>("%03d - %03d", 1, 0);
+  testAgainstLibc<int>("%03d - %03d", -1, 123);
+  testAgainstLibc<int>("%03d - %03d", -1, -123);
+  testAgainstLibc<int>("%03d - %03d", 12, 1234);
+  testAgainstLibc<int>("%03d - %03d", -12, -1234);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/tsd_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/tsd_test.cc
new file mode 100644
index 0000000..9ab1010
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/tsd_test.cc
@@ -0,0 +1,168 @@
+//===-- tsd_test.cc ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsd_exclusive.h"
+#include "tsd_shared.h"
+
+#include "gtest/gtest.h"
+
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+// We mock out an allocator with a TSD registry, mostly using empty stubs. The
+// cache contains a single volatile uptr, to be able to test that several
+// concurrent threads will not access or modify the same cache at the same time.
+template <class Config> class MockAllocator {
+public:
+  using ThisT = MockAllocator<Config>;
+  using TSDRegistryT = typename Config::template TSDRegistryT<ThisT>;
+  using CacheT = struct MockCache { volatile scudo::uptr Canary; };
+  using QuarantineCacheT = struct MockQuarantine {};
+
+  void initLinkerInitialized() {
+    // This should only be called once by the registry.
+    EXPECT_FALSE(Initialized);
+    Initialized = true;
+  }
+  void reset() { memset(this, 0, sizeof(*this)); }
+
+  void unmapTestOnly() { TSDRegistry.unmapTestOnly(); }
+  void initCache(CacheT *Cache) { memset(Cache, 0, sizeof(*Cache)); }
+  void commitBack(scudo::TSD<MockAllocator> *TSD) {}
+  TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
+
+  bool isInitialized() { return Initialized; }
+
+private:
+  bool Initialized;
+  TSDRegistryT TSDRegistry;
+};
+
+struct OneCache {
+  template <class Allocator>
+  using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 1U>;
+};
+
+struct SharedCaches {
+  template <class Allocator>
+  using TSDRegistryT = scudo::TSDRegistrySharedT<Allocator, 16U>;
+};
+
+struct ExclusiveCaches {
+  template <class Allocator>
+  using TSDRegistryT = scudo::TSDRegistryExT<Allocator>;
+};
+
+TEST(ScudoTSDTest, TSDRegistryInit) {
+  using AllocatorT = MockAllocator<OneCache>;
+  auto Deleter = [](AllocatorT *A) {
+    A->unmapTestOnly();
+    delete A;
+  };
+  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+                                                           Deleter);
+  Allocator->reset();
+  EXPECT_FALSE(Allocator->isInitialized());
+
+  auto Registry = Allocator->getTSDRegistry();
+  Registry->initLinkerInitialized(Allocator.get());
+  EXPECT_TRUE(Allocator->isInitialized());
+}
+
+template <class AllocatorT> static void testRegistry() {
+  auto Deleter = [](AllocatorT *A) {
+    A->unmapTestOnly();
+    delete A;
+  };
+  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+                                                           Deleter);
+  Allocator->reset();
+  EXPECT_FALSE(Allocator->isInitialized());
+
+  auto Registry = Allocator->getTSDRegistry();
+  Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
+  EXPECT_TRUE(Allocator->isInitialized());
+
+  bool UnlockRequired;
+  auto TSD = Registry->getTSDAndLock(&UnlockRequired);
+  EXPECT_NE(TSD, nullptr);
+  EXPECT_EQ(TSD->Cache.Canary, 0U);
+  if (UnlockRequired)
+    TSD->unlock();
+
+  Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
+  TSD = Registry->getTSDAndLock(&UnlockRequired);
+  EXPECT_NE(TSD, nullptr);
+  EXPECT_EQ(TSD->Cache.Canary, 0U);
+  memset(&TSD->Cache, 0x42, sizeof(TSD->Cache));
+  if (UnlockRequired)
+    TSD->unlock();
+}
+
+TEST(ScudoTSDTest, TSDRegistryBasic) {
+  testRegistry<MockAllocator<OneCache>>();
+  testRegistry<MockAllocator<SharedCaches>>();
+  testRegistry<MockAllocator<ExclusiveCaches>>();
+}
+
+static std::mutex Mutex;
+static std::condition_variable Cv;
+static bool Ready = false;
+
+template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
+  auto Registry = Allocator->getTSDRegistry();
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    while (!Ready)
+      Cv.wait(Lock);
+  }
+  Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
+  bool UnlockRequired;
+  auto TSD = Registry->getTSDAndLock(&UnlockRequired);
+  EXPECT_NE(TSD, nullptr);
+  // For an exclusive TSD, the cache should be empty. We cannot guarantee the
+  // same for a shared TSD.
+  if (!UnlockRequired)
+    EXPECT_EQ(TSD->Cache.Canary, 0U);
+  // Transform the thread id to a uptr to use it as canary.
+  const scudo::uptr Canary = static_cast<scudo::uptr>(
+      std::hash<std::thread::id>{}(std::this_thread::get_id()));
+  TSD->Cache.Canary = Canary;
+  // Loop a few times to make sure that a concurrent thread isn't modifying it.
+  for (scudo::uptr I = 0; I < 4096U; I++)
+    EXPECT_EQ(TSD->Cache.Canary, Canary);
+  if (UnlockRequired)
+    TSD->unlock();
+}
+
+template <class AllocatorT> static void testRegistryThreaded() {
+  auto Deleter = [](AllocatorT *A) {
+    A->unmapTestOnly();
+    delete A;
+  };
+  std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+                                                           Deleter);
+  Allocator->reset();
+  std::thread Threads[32];
+  for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
+    Threads[I] = std::thread(stressCache<AllocatorT>, Allocator.get());
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    Ready = true;
+    Cv.notify_all();
+  }
+  for (auto &T : Threads)
+    T.join();
+}
+
+TEST(ScudoTSDTest, TSDRegistryThreaded) {
+  testRegistryThreaded<MockAllocator<OneCache>>();
+  testRegistryThreaded<MockAllocator<SharedCaches>>();
+  testRegistryThreaded<MockAllocator<ExclusiveCaches>>();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/vector_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/vector_test.cc
new file mode 100644
index 0000000..ebfcc43
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/vector_test.cc
@@ -0,0 +1,43 @@
+//===-- vector_test.cc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "vector.h"
+
+#include "gtest/gtest.h"
+
+TEST(ScudoVectorTest, Basic) {
+  scudo::Vector<int> V;
+  EXPECT_EQ(V.size(), 0U);
+  V.push_back(42);
+  EXPECT_EQ(V.size(), 1U);
+  EXPECT_EQ(V[0], 42);
+  V.push_back(43);
+  EXPECT_EQ(V.size(), 2U);
+  EXPECT_EQ(V[0], 42);
+  EXPECT_EQ(V[1], 43);
+}
+
+TEST(ScudoVectorTest, Stride) {
+  scudo::Vector<int> V;
+  for (int i = 0; i < 1000; i++) {
+    V.push_back(i);
+    EXPECT_EQ(V.size(), i + 1U);
+    EXPECT_EQ(V[i], i);
+  }
+  for (int i = 0; i < 1000; i++)
+    EXPECT_EQ(V[i], i);
+}
+
+TEST(ScudoVectorTest, ResizeReduction) {
+  scudo::Vector<int> V;
+  V.push_back(0);
+  V.push_back(0);
+  EXPECT_EQ(V.size(), 2U);
+  V.resize(1);
+  EXPECT_EQ(V.size(), 1U);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cc
new file mode 100644
index 0000000..a5ba806
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cc
@@ -0,0 +1,225 @@
+//===-- wrappers_c_test.cc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#include "gtest/gtest.h"
+
+#include <limits.h>
+#include <malloc.h>
+#include <unistd.h>
+
+// Note that every C allocation function in the test binary will be fulfilled
+// by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
+// But this might also lead to unexpected side-effects, since the allocation and
+// deallocation operations in the TEST functions will coexist with others (see
+// the EXPECT_DEATH comment below).
+
+// We have to use a small quarantine to make sure that our double-free tests
+// trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
+// freed (this depends on the size obviously) and the following free succeeds.
+extern "C" __attribute__((visibility("default"))) const char *
+__scudo_default_options() {
+  return "quarantine_size_kb=256:thread_local_quarantine_size_kb=128:"
+         "quarantine_max_chunk_size=512";
+}
+
+static const size_t Size = 100U;
+
+TEST(ScudoWrappersCTest, Malloc) {
+  void *P = malloc(Size);
+  EXPECT_NE(P, nullptr);
+  EXPECT_LE(Size, malloc_usable_size(P));
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
+  EXPECT_DEATH(
+      free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
+  free(P);
+  EXPECT_DEATH(free(P), "");
+
+  P = malloc(0U);
+  EXPECT_NE(P, nullptr);
+  free(P);
+
+  errno = 0;
+  EXPECT_EQ(malloc(SIZE_MAX), nullptr);
+  EXPECT_EQ(errno, ENOMEM);
+}
+
+TEST(ScudoWrappersCTest, Calloc) {
+  void *P = calloc(1U, Size);
+  EXPECT_NE(P, nullptr);
+  EXPECT_LE(Size, malloc_usable_size(P));
+  for (size_t I = 0; I < Size; I++)
+    EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
+  free(P);
+
+  P = calloc(1U, 0U);
+  EXPECT_NE(P, nullptr);
+  free(P);
+  P = calloc(0U, 1U);
+  EXPECT_NE(P, nullptr);
+  free(P);
+
+  errno = 0;
+  EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
+  EXPECT_EQ(errno, ENOMEM);
+  errno = 0;
+  EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
+  if (SCUDO_ANDROID)
+    EXPECT_EQ(errno, ENOMEM);
+  errno = 0;
+  EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
+  EXPECT_EQ(errno, ENOMEM);
+}
+
+TEST(ScudoWrappersCTest, Memalign) {
+  void *P;
+  for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
+    const size_t Alignment = 1U << I;
+
+    P = memalign(Alignment, Size);
+    EXPECT_NE(P, nullptr);
+    EXPECT_LE(Size, malloc_usable_size(P));
+    EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
+    free(P);
+
+    P = nullptr;
+    EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
+    EXPECT_NE(P, nullptr);
+    EXPECT_LE(Size, malloc_usable_size(P));
+    EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
+    free(P);
+  }
+
+  EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
+  EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
+  EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
+
+  // Android's memalign accepts non power-of-2 alignments, and 0.
+  if (SCUDO_ANDROID) {
+    for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
+      P = memalign(Alignment, 1024U);
+      EXPECT_NE(P, nullptr);
+      free(P);
+    }
+  }
+}
+
+TEST(ScudoWrappersCTest, AlignedAlloc) {
+  const size_t Alignment = 4096U;
+  void *P = aligned_alloc(Alignment, Alignment * 4U);
+  EXPECT_NE(P, nullptr);
+  EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
+  free(P);
+
+  errno = 0;
+  P = aligned_alloc(Alignment, Size);
+  EXPECT_EQ(P, nullptr);
+  EXPECT_EQ(errno, EINVAL);
+}
+
+TEST(ScudoWrappersCTest, Realloc) {
+  // realloc(nullptr, N) is malloc(N)
+  void *P = realloc(nullptr, 0U);
+  EXPECT_NE(P, nullptr);
+  free(P);
+
+  P = malloc(Size);
+  EXPECT_NE(P, nullptr);
+  // realloc(P, 0U) is free(P) and returns nullptr
+  EXPECT_EQ(realloc(P, 0U), nullptr);
+
+  P = malloc(Size);
+  EXPECT_NE(P, nullptr);
+  EXPECT_LE(Size, malloc_usable_size(P));
+  memset(P, 0x42, Size);
+
+  P = realloc(P, Size * 2U);
+  EXPECT_NE(P, nullptr);
+  EXPECT_LE(Size * 2U, malloc_usable_size(P));
+  for (size_t I = 0; I < Size; I++)
+    EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
+
+  P = realloc(P, Size / 2U);
+  EXPECT_NE(P, nullptr);
+  EXPECT_LE(Size / 2U, malloc_usable_size(P));
+  for (size_t I = 0; I < Size / 2U; I++)
+    EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
+  free(P);
+
+  EXPECT_DEATH(P = realloc(P, Size), "");
+
+  errno = 0;
+  EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
+  EXPECT_EQ(errno, ENOMEM);
+  P = malloc(Size);
+  EXPECT_NE(P, nullptr);
+  errno = 0;
+  EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
+  EXPECT_EQ(errno, ENOMEM);
+  free(P);
+
+  // Android allows realloc of memalign pointers.
+  if (SCUDO_ANDROID) {
+    const size_t Alignment = 1024U;
+    P = memalign(Alignment, Size);
+    EXPECT_NE(P, nullptr);
+    EXPECT_LE(Size, malloc_usable_size(P));
+    EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
+    memset(P, 0x42, Size);
+
+    P = realloc(P, Size * 2U);
+    EXPECT_NE(P, nullptr);
+    EXPECT_LE(Size * 2U, malloc_usable_size(P));
+    for (size_t I = 0; I < Size; I++)
+      EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
+    free(P);
+  }
+}
+
+#ifndef M_DECAY_TIME
+#define M_DECAY_TIME -100
+#endif
+
+#ifndef M_PURGE
+#define M_PURGE -101
+#endif
+
+TEST(ScudoWrappersCTest, Mallopt) {
+  errno = 0;
+  EXPECT_EQ(mallopt(-1000, 1), 0);
+  // mallopt doesn't set errno.
+  EXPECT_EQ(errno, 0);
+
+  EXPECT_EQ(mallopt(M_PURGE, 0), 1);
+
+  EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
+  EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
+  EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
+  EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
+}
+
+TEST(ScudoWrappersCTest, OtherAlloc) {
+  const size_t PageSize = sysconf(_SC_PAGESIZE);
+
+  void *P = pvalloc(Size);
+  EXPECT_NE(P, nullptr);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
+  EXPECT_LE(PageSize, malloc_usable_size(P));
+  free(P);
+
+  EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
+
+  P = pvalloc(Size);
+  EXPECT_NE(P, nullptr);
+  EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
+  free(P);
+
+  EXPECT_EQ(valloc(SIZE_MAX), nullptr);
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/wrappers_cpp_test.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/wrappers_cpp_test.cc
new file mode 100644
index 0000000..766c13f
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tests/wrappers_cpp_test.cc
@@ -0,0 +1,120 @@
+//===-- wrappers_cpp_test.cc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gtest/gtest.h"
+
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+void operator delete(void *, size_t) noexcept;
+void operator delete[](void *, size_t) noexcept;
+
+// Note that every Cxx allocation function in the test binary will be fulfilled
+// by Scudo. See the comment in the C counterpart of this file.
+
+extern "C" __attribute__((visibility("default"))) const char *
+__scudo_default_options() {
+  return "quarantine_size_kb=256:thread_local_quarantine_size_kb=128:"
+         "quarantine_max_chunk_size=512:dealloc_type_mismatch=true";
+}
+
+template <typename T> static void testCxxNew() {
+  T *P = new T;
+  EXPECT_NE(P, nullptr);
+  memset(P, 0x42, sizeof(T));
+  EXPECT_DEATH(delete[] P, "");
+  delete P;
+  EXPECT_DEATH(delete P, "");
+
+  P = new T;
+  EXPECT_NE(P, nullptr);
+  memset(P, 0x42, sizeof(T));
+  operator delete(P, sizeof(T));
+
+  P = new (std::nothrow) T;
+  EXPECT_NE(P, nullptr);
+  memset(P, 0x42, sizeof(T));
+  delete P;
+
+  const size_t N = 16U;
+  T *A = new T[N];
+  EXPECT_NE(A, nullptr);
+  memset(A, 0x42, sizeof(T) * N);
+  EXPECT_DEATH(delete A, "");
+  delete[] A;
+  EXPECT_DEATH(delete[] A, "");
+
+  A = new T[N];
+  EXPECT_NE(A, nullptr);
+  memset(A, 0x42, sizeof(T) * N);
+  operator delete[](A, sizeof(T) * N);
+
+  A = new (std::nothrow) T[N];
+  EXPECT_NE(A, nullptr);
+  memset(A, 0x42, sizeof(T) * N);
+  delete[] A;
+}
+
+class Pixel {
+public:
+  enum class Color { Red, Green, Blue };
+  int X = 0;
+  int Y = 0;
+  Color C = Color::Red;
+};
+
+TEST(ScudoWrappersCppTest, New) {
+  testCxxNew<bool>();
+  testCxxNew<uint8_t>();
+  testCxxNew<uint16_t>();
+  testCxxNew<uint32_t>();
+  testCxxNew<uint64_t>();
+  testCxxNew<float>();
+  testCxxNew<double>();
+  testCxxNew<long double>();
+  testCxxNew<Pixel>();
+}
+
+static std::mutex Mutex;
+static std::condition_variable Cv;
+static bool Ready = false;
+
+static void stressNew() {
+  std::vector<uintptr_t *> V;
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    while (!Ready)
+      Cv.wait(Lock);
+  }
+  for (size_t I = 0; I < 256U; I++) {
+    const size_t N = std::rand() % 128U;
+    uintptr_t *P = new uintptr_t[N];
+    if (P) {
+      memset(P, 0x42, sizeof(uintptr_t) * N);
+      V.push_back(P);
+    }
+  }
+  while (!V.empty()) {
+    delete[] V.back();
+    V.pop_back();
+  }
+}
+
+TEST(ScudoWrappersCppTest, ThreadedNew) {
+  std::thread Threads[32];
+  for (size_t I = 0U; I < sizeof(Threads) / sizeof(Threads[0]); I++)
+    Threads[I] = std::thread(stressNew);
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    Ready = true;
+    Cv.notify_all();
+  }
+  for (auto &T : Threads)
+    T.join();
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
new file mode 100644
index 0000000..f24ff01
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
@@ -0,0 +1,66 @@
+//===-- tsd.h ---------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#define SCUDO_TSD_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
+
+// With some build setups, this might still not be defined.
+#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
+#define PTHREAD_DESTRUCTOR_ITERATIONS 4
+#endif
+
+namespace scudo {
+
+template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
+  typename Allocator::CacheT Cache;
+  typename Allocator::QuarantineCacheT QuarantineCache;
+  u8 DestructorIterations;
+
+  void initLinkerInitialized(Allocator *Instance) {
+    Instance->initCache(&Cache);
+    DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
+  }
+  void init(Allocator *Instance) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(Instance);
+  }
+
+  void commitBack(Allocator *Instance) { Instance->commitBack(this); }
+
+  INLINE bool tryLock() {
+    if (Mutex.tryLock()) {
+      atomic_store_relaxed(&Precedence, 0);
+      return true;
+    }
+    if (atomic_load_relaxed(&Precedence) == 0)
+      atomic_store_relaxed(
+          &Precedence,
+          static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
+    return false;
+  }
+  INLINE void lock() {
+    atomic_store_relaxed(&Precedence, 0);
+    Mutex.lock();
+  }
+  INLINE void unlock() { Mutex.unlock(); }
+  INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+
+private:
+  HybridMutex Mutex;
+  atomic_uptr Precedence;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
new file mode 100644
index 0000000..18cce1c
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -0,0 +1,118 @@
+//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_EXCLUSIVE_H_
+#define SCUDO_TSD_EXCLUSIVE_H_
+
+#include "tsd.h"
+
+#include <pthread.h>
+
+namespace scudo {
+
+enum class ThreadState : u8 {
+  NotInitialized = 0,
+  Initialized,
+  TornDown,
+};
+
+template <class Allocator> void teardownThread(void *Ptr);
+
+template <class Allocator> struct TSDRegistryExT {
+  void initLinkerInitialized(Allocator *Instance) {
+    Instance->initLinkerInitialized();
+    CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
+    FallbackTSD = reinterpret_cast<TSD<Allocator> *>(
+        map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd"));
+    FallbackTSD->initLinkerInitialized(Instance);
+    Initialized = true;
+  }
+  void init(Allocator *Instance) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(Instance);
+  }
+
+  void unmapTestOnly() {
+    unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
+  }
+
+  ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
+    if (LIKELY(State != ThreadState::NotInitialized))
+      return;
+    initThread(Instance, MinimalInit);
+  }
+
+  ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+    if (LIKELY(State == ThreadState::Initialized)) {
+      *UnlockRequired = false;
+      return &ThreadTSD;
+    }
+    DCHECK(FallbackTSD);
+    FallbackTSD->lock();
+    *UnlockRequired = true;
+    return FallbackTSD;
+  }
+
+private:
+  void initOnceMaybe(Allocator *Instance) {
+    ScopedLock L(Mutex);
+    if (Initialized)
+      return;
+    initLinkerInitialized(Instance); // Sets Initialized.
+  }
+
+  // Using minimal initialization allows for global initialization while keeping
+  // the thread specific structure untouched. The fallback structure will be
+  // used instead.
+  NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
+    initOnceMaybe(Instance);
+    if (MinimalInit)
+      return;
+    CHECK_EQ(
+        pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
+    ThreadTSD.initLinkerInitialized(Instance);
+    State = ThreadState::Initialized;
+  }
+
+  pthread_key_t PThreadKey;
+  bool Initialized;
+  TSD<Allocator> *FallbackTSD;
+  HybridMutex Mutex;
+  static THREADLOCAL ThreadState State;
+  static THREADLOCAL TSD<Allocator> ThreadTSD;
+
+  friend void teardownThread<Allocator>(void *Ptr);
+};
+
+template <class Allocator>
+THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
+template <class Allocator>
+THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;
+
+template <class Allocator> void teardownThread(void *Ptr) {
+  typedef TSDRegistryExT<Allocator> TSDRegistryT;
+  Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
+  // The glibc POSIX thread-local-storage deallocation routine calls user
+  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+  // We want to be called last since other destructors might call free and the
+  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+  // quarantine and swallowing the cache.
+  if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
+    TSDRegistryT::ThreadTSD.DestructorIterations--;
+    // If pthread_setspecific fails, we will go ahead with the teardown.
+    if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
+                                   Ptr) == 0))
+      return;
+  }
+  TSDRegistryT::ThreadTSD.commitBack(Instance);
+  TSDRegistryT::State = ThreadState::TornDown;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_EXCLUSIVE_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
new file mode 100644
index 0000000..0f0a83a
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -0,0 +1,169 @@
+//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_SHARED_H_
+#define SCUDO_TSD_SHARED_H_
+
+#include "linux.h" // for getAndroidTlsPtr()
+#include "tsd.h"
+
+#include <pthread.h>
+
+namespace scudo {
+
+template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
+  void initLinkerInitialized(Allocator *Instance) {
+    Instance->initLinkerInitialized();
+    CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS
+    NumberOfTSDs = Min(Max(1U, getNumberOfCPUs()), MaxTSDCount);
+    TSDs = reinterpret_cast<TSD<Allocator> *>(
+        map(nullptr, sizeof(TSD<Allocator>) * NumberOfTSDs, "scudo:tsd"));
+    for (u32 I = 0; I < NumberOfTSDs; I++)
+      TSDs[I].initLinkerInitialized(Instance);
+    // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
+    // array of TSDs in a random order. For details, see:
+    // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
+    for (u32 I = 0; I < NumberOfTSDs; I++) {
+      u32 A = I + 1;
+      u32 B = NumberOfTSDs;
+      // Find the GCD between I + 1 and NumberOfTSDs. If 1, they are coprimes.
+      while (B != 0) {
+        const u32 T = A;
+        A = B;
+        B = T % B;
+      }
+      if (A == 1)
+        CoPrimes[NumberOfCoPrimes++] = I + 1;
+    }
+    Initialized = true;
+  }
+  void init(Allocator *Instance) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(Instance);
+  }
+
+  void unmapTestOnly() {
+    unmap(reinterpret_cast<void *>(TSDs),
+          sizeof(TSD<Allocator>) * NumberOfTSDs);
+  }
+
+  ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
+                                     UNUSED bool MinimalInit) {
+    if (LIKELY(getCurrentTSD()))
+      return;
+    initThread(Instance);
+  }
+
+  ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+    TSD<Allocator> *TSD = getCurrentTSD();
+    DCHECK(TSD);
+    *UnlockRequired = true;
+    // Try to lock the currently associated context.
+    if (TSD->tryLock())
+      return TSD;
+    // If that fails, go down the slow path.
+    return getTSDAndLockSlow(TSD);
+  }
+
+private:
+  ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
+#if SCUDO_ANDROID
+    *getAndroidTlsPtr() = reinterpret_cast<uptr>(CurrentTSD);
+#elif SCUDO_LINUX
+    ThreadTSD = CurrentTSD;
+#else
+    CHECK_EQ(
+        pthread_setspecific(PThreadKey, reinterpret_cast<void *>(CurrentTSD)),
+        0);
+#endif
+  }
+
+  ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
+#if SCUDO_ANDROID
+    return reinterpret_cast<TSD<Allocator> *>(*getAndroidTlsPtr());
+#elif SCUDO_LINUX
+    return ThreadTSD;
+#else
+    return reinterpret_cast<TSD<Allocator> *>(pthread_getspecific(PThreadKey));
+#endif
+  }
+
+  void initOnceMaybe(Allocator *Instance) {
+    ScopedLock L(Mutex);
+    if (Initialized)
+      return;
+    initLinkerInitialized(Instance); // Sets Initialized.
+  }
+
+  NOINLINE void initThread(Allocator *Instance) {
+    initOnceMaybe(Instance);
+    // Initial context assignment is done in a plain round-robin fashion.
+    const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
+    setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+  }
+
+  NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
+    if (MaxTSDCount > 1U && NumberOfTSDs > 1U) {
+      // Use the Precedence of the current TSD as our random seed. Since we are
+      // in the slow path, it means that tryLock failed, and as a result it's
+      // very likely that said Precedence is non-zero.
+      u32 RandState = static_cast<u32>(CurrentTSD->getPrecedence());
+      const u32 R = getRandomU32(&RandState);
+      const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
+      u32 Index = R % NumberOfTSDs;
+      uptr LowestPrecedence = UINTPTR_MAX;
+      TSD<Allocator> *CandidateTSD = nullptr;
+      // Go randomly through at most 4 contexts and find a candidate.
+      for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+        if (TSDs[Index].tryLock()) {
+          setCurrentTSD(&TSDs[Index]);
+          return &TSDs[Index];
+        }
+        const uptr Precedence = TSDs[Index].getPrecedence();
+        // A 0 precedence here means another thread just locked this TSD.
+        if (Precedence && Precedence < LowestPrecedence) {
+          CandidateTSD = &TSDs[Index];
+          LowestPrecedence = Precedence;
+        }
+        Index += Inc;
+        if (Index >= NumberOfTSDs)
+          Index -= NumberOfTSDs;
+      }
+      if (CandidateTSD) {
+        CandidateTSD->lock();
+        setCurrentTSD(CandidateTSD);
+        return CandidateTSD;
+      }
+    }
+    // Last resort, stick with the current one.
+    CurrentTSD->lock();
+    return CurrentTSD;
+  }
+
+  pthread_key_t PThreadKey;
+  atomic_u32 CurrentIndex;
+  u32 NumberOfTSDs;
+  TSD<Allocator> *TSDs;
+  u32 NumberOfCoPrimes;
+  u32 CoPrimes[MaxTSDCount];
+  bool Initialized;
+  HybridMutex Mutex;
+#if SCUDO_LINUX && !SCUDO_ANDROID
+  static THREADLOCAL TSD<Allocator> *ThreadTSD;
+#endif
+};
+
+#if SCUDO_LINUX && !SCUDO_ANDROID
+template <class Allocator, u32 MaxTSDCount>
+THREADLOCAL TSD<Allocator>
+    *TSDRegistrySharedT<Allocator, MaxTSDCount>::ThreadTSD;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_SHARED_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/vector.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
new file mode 100644
index 0000000..3cb4005
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
@@ -0,0 +1,118 @@
+//===-- vector.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_VECTOR_H_
+#define SCUDO_VECTOR_H_
+
+#include "common.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// A low-level vector based on map. May incur a significant memory overhead for
+// small vectors. The current implementation supports only POD types.
+template <typename T> class VectorNoCtor {
+public:
+  void init(uptr InitialCapacity) {
+    CapacityBytes = 0;
+    Size = 0;
+    Data = nullptr;
+    reserve(InitialCapacity);
+  }
+  void destroy() {
+    if (Data)
+      unmap(Data, CapacityBytes);
+  }
+  T &operator[](uptr I) {
+    DCHECK_LT(I, Size);
+    return Data[I];
+  }
+  const T &operator[](uptr I) const {
+    DCHECK_LT(I, Size);
+    return Data[I];
+  }
+  void push_back(const T &Element) {
+    DCHECK_LE(Size, capacity());
+    if (Size == capacity()) {
+      const uptr NewCapacity = roundUpToPowerOfTwo(Size + 1);
+      reallocate(NewCapacity);
+    }
+    memcpy(&Data[Size++], &Element, sizeof(T));
+  }
+  T &back() {
+    DCHECK_GT(Size, 0);
+    return Data[Size - 1];
+  }
+  void pop_back() {
+    DCHECK_GT(Size, 0);
+    Size--;
+  }
+  uptr size() const { return Size; }
+  const T *data() const { return Data; }
+  T *data() { return Data; }
+  uptr capacity() const { return CapacityBytes / sizeof(T); }
+  void reserve(uptr NewSize) {
+    // Never downsize internal buffer.
+    if (NewSize > capacity())
+      reallocate(NewSize);
+  }
+  void resize(uptr NewSize) {
+    if (NewSize > Size) {
+      reserve(NewSize);
+      memset(&Data[Size], 0, sizeof(T) * (NewSize - Size));
+    }
+    Size = NewSize;
+  }
+
+  void clear() { Size = 0; }
+  bool empty() const { return size() == 0; }
+
+  const T *begin() const { return data(); }
+  T *begin() { return data(); }
+  const T *end() const { return data() + size(); }
+  T *end() { return data() + size(); }
+
+private:
+  void reallocate(uptr NewCapacity) {
+    DCHECK_GT(NewCapacity, 0);
+    DCHECK_LE(Size, NewCapacity);
+    const uptr NewCapacityBytes =
+        roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
+    T *NewData = (T *)map(nullptr, NewCapacityBytes, "scudo:vector");
+    if (Data) {
+      memcpy(NewData, Data, Size * sizeof(T));
+      unmap(Data, CapacityBytes);
+    }
+    Data = NewData;
+    CapacityBytes = NewCapacityBytes;
+  }
+
+  T *Data;
+  uptr CapacityBytes;
+  uptr Size;
+};
+
+template <typename T> class Vector : public VectorNoCtor<T> {
+public:
+  Vector() { VectorNoCtor<T>::init(1); }
+  explicit Vector(uptr Count) {
+    VectorNoCtor<T>::init(Count);
+    this->resize(Count);
+  }
+  ~Vector() { VectorNoCtor<T>::destroy(); }
+  // Disallow copies and moves.
+  Vector(const Vector &) = delete;
+  Vector &operator=(const Vector &) = delete;
+  Vector(Vector &&) = delete;
+  Vector &operator=(Vector &&) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_VECTOR_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cc
new file mode 100644
index 0000000..5908c60
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cc
@@ -0,0 +1,39 @@
+//===-- wrappers_c.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+static scudo::Allocator<scudo::Config> Allocator;
+// Pointer to the static allocator so that the C++ wrappers can access it.
+// Technically we could have a completely separated heap for C & C++ but in
+// reality the amount of cross pollination between the two is staggering.
+scudo::Allocator<scudo::Config> *AllocatorPtr = &Allocator;
+
+extern "C" {
+
+#define SCUDO_PREFIX(name) name
+#define SCUDO_ALLOCATOR Allocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
+
+} // extern "C"
+
+#endif // !SCUDO_ANDROID || !_BIONIC
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
new file mode 100644
index 0000000..33a0c53
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
@@ -0,0 +1,52 @@
+//===-- wrappers_c.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_WRAPPERS_C_H_
+#define SCUDO_WRAPPERS_C_H_
+
+#include "platform.h"
+#include "stats.h"
+
+// Bionic's struct mallinfo consists of size_t (mallinfo(3) uses int).
+#if SCUDO_ANDROID
+typedef size_t __scudo_mallinfo_data_t;
+#else
+typedef int __scudo_mallinfo_data_t;
+#endif
+
+struct __scudo_mallinfo {
+  __scudo_mallinfo_data_t arena;
+  __scudo_mallinfo_data_t ordblks;
+  __scudo_mallinfo_data_t smblks;
+  __scudo_mallinfo_data_t hblks;
+  __scudo_mallinfo_data_t hblkhd;
+  __scudo_mallinfo_data_t usmblks;
+  __scudo_mallinfo_data_t fsmblks;
+  __scudo_mallinfo_data_t uordblks;
+  __scudo_mallinfo_data_t fordblks;
+  __scudo_mallinfo_data_t keepcost;
+};
+
+// Android sometimes includes malloc.h no matter what, which yields to
+// conflicting return types for mallinfo() if we use our own structure. So if
+// struct mallinfo is declared (#define courtesy of malloc.h), use it directly.
+#if STRUCT_MALLINFO_DECLARED
+#define SCUDO_MALLINFO mallinfo
+#else
+#define SCUDO_MALLINFO __scudo_mallinfo
+#endif
+
+#ifndef M_DECAY_TIME
+#define M_DECAY_TIME -100
+#endif
+
+#ifndef M_PURGE
+#define M_PURGE -101
+#endif
+
+#endif // SCUDO_WRAPPERS_C_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
new file mode 100644
index 0000000..2beddc7
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -0,0 +1,176 @@
+//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PREFIX
+#error "Define SCUDO_PREFIX prior to including this file!"
+#endif
+
+// malloc-type functions have to be aligned to std::max_align_t. This is
+// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
+// do not have to abide by the same requirement.
+#ifndef SCUDO_MALLOC_ALIGNMENT
+#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
+#endif
+
+INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
+  scudo::uptr Product;
+  if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
+    if (SCUDO_ALLOCATOR.canReturnNull()) {
+      errno = ENOMEM;
+      return nullptr;
+    }
+    scudo::reportCallocOverflow(nmemb, size);
+  }
+  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+      Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
+  SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+}
+
+INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
+  struct SCUDO_MALLINFO Info = {};
+  scudo::StatCounters Stats;
+  SCUDO_ALLOCATOR.getStats(Stats);
+  Info.uordblks =
+      static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
+  return Info;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
+  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+      size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+}
+
+#if SCUDO_ANDROID
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
+#else
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
+#endif
+  return SCUDO_ALLOCATOR.getUsableSize(ptr);
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
+  // Android rounds up the alignment to a power of two if it isn't one.
+  if (SCUDO_ANDROID) {
+    if (UNLIKELY(!alignment)) {
+      alignment = 1U;
+    } else {
+      if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
+        alignment = scudo::roundUpToPowerOfTwo(alignment);
+    }
+  } else {
+    if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
+      if (SCUDO_ALLOCATOR.canReturnNull()) {
+        errno = EINVAL;
+        return nullptr;
+      }
+      scudo::reportAlignmentNotPowerOfTwo(alignment);
+    }
+  }
+  return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
+                                  alignment);
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
+                                                size_t size) {
+  if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
+    if (!SCUDO_ALLOCATOR.canReturnNull())
+      scudo::reportInvalidPosixMemalignAlignment(alignment);
+    return EINVAL;
+  }
+  void *Ptr =
+      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
+  if (UNLIKELY(!Ptr))
+    return ENOMEM;
+  *memptr = Ptr;
+  return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
+    if (SCUDO_ALLOCATOR.canReturnNull()) {
+      errno = ENOMEM;
+      return nullptr;
+    }
+    scudo::reportPvallocOverflow(size);
+  }
+  // pvalloc(0) should allocate one page.
+  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+      size ? scudo::roundUpTo(size, PageSize) : PageSize,
+      scudo::Chunk::Origin::Memalign, PageSize));
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
+  if (!ptr)
+    return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+        size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+  if (size == 0) {
+    SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+    return nullptr;
+  }
+  return scudo::setErrnoOnNull(
+      SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
+  return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+      size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
+}
+
+// Bionic wants a function named PREFIX_iterate and not PREFIX_malloc_iterate
+// which is somewhat inconsistent with the rest, workaround that.
+#if SCUDO_ANDROID && _BIONIC
+#define SCUDO_ITERATE iterate
+#else
+#define SCUDO_ITERATE malloc_iterate
+#endif
+
+INTERFACE WEAK int SCUDO_PREFIX(SCUDO_ITERATE)(
+    uintptr_t base, size_t size,
+    void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
+  SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
+  return 0;
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
+  SCUDO_ALLOCATOR.disable();
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
+
+INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
+  if (param == M_DECAY_TIME) {
+    // TODO(kostyak): set release_to_os_interval_ms accordingly.
+    return 1;
+  } else if (param == M_PURGE) {
+    SCUDO_ALLOCATOR.releaseToOS();
+    return 1;
+  }
+  return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
+                                                 size_t size) {
+  if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
+    if (SCUDO_ALLOCATOR.canReturnNull()) {
+      errno = EINVAL;
+      return nullptr;
+    }
+    scudo::reportInvalidAlignedAllocAlignment(alignment, size);
+  }
+  return scudo::setErrnoOnNull(
+      SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(int, FILE *) {
+  errno = ENOTSUP;
+  return -1;
+}
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cc
new file mode 100644
index 0000000..f6e863d
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cc
@@ -0,0 +1,49 @@
+//===-- wrappers_c_bionic.cc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// This is only used when compiled as part of Bionic.
+#if SCUDO_ANDROID && _BIONIC
+
+#include "allocator_config.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+static scudo::Allocator<scudo::AndroidConfig> Allocator;
+static scudo::Allocator<scudo::AndroidSvelteConfig> SvelteAllocator;
+
+extern "C" {
+
+// Regular MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name)
+#define SCUDO_ALLOCATOR Allocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// Svelte MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_svelte_, name)
+#define SCUDO_ALLOCATOR SvelteAllocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// The following is the only function that will end up initializing both
+// allocators, which will result in a slight increase in memory footprint.
+INTERFACE void __scudo_print_stats(void) {
+  Allocator.printStats();
+  SvelteAllocator.printStats();
+}
+
+} // extern "C"
+
+#endif // SCUDO_ANDROID && _BIONIC
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
new file mode 100644
index 0000000..d4370d5
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
@@ -0,0 +1,67 @@
+//===-- wrappers_c_checks.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKS_H_
+#define SCUDO_CHECKS_H_
+
+#include "common.h"
+
+#include <errno.h>
+
+#ifndef __has_builtin
+#define __has_builtin(X) 0
+#endif
+
+namespace scudo {
+
+// A common errno setting logic shared by almost all Scudo C wrappers.
+INLINE void *setErrnoOnNull(void *Ptr) {
+  if (UNLIKELY(!Ptr))
+    errno = ENOMEM;
+  return Ptr;
+}
+
+// Checks return true on failure.
+
+// Checks aligned_alloc() parameters, verifies that the alignment is a power of
+// two and that the size is a multiple of alignment.
+INLINE bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
+  return Alignment == 0 || !isPowerOfTwo(Alignment) ||
+         !isAligned(Size, Alignment);
+}
+
+// Checks posix_memalign() parameters, verifies that alignment is a power of two
+// and a multiple of sizeof(void *).
+INLINE bool checkPosixMemalignAlignment(uptr Alignment) {
+  return Alignment == 0 || !isPowerOfTwo(Alignment) ||
+         !isAligned(Alignment, sizeof(void *));
+}
+
+// Returns true if calloc(Size, N) overflows on Size*N calculation. Use a
+// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
+// costly division.
+INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
+#if __has_builtin(__builtin_umull_overflow)
+  return __builtin_umull_overflow(Size, N, Product);
+#else
+  *Product = Size * N;
+  if (!Size)
+    return false;
+  return (*Product / Size) != N;
+#endif
+}
+
+// Returns true if the size passed to pvalloc overflows when rounded to the next
+// multiple of PageSize.
+INLINE bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
+  return roundUpTo(Size, PageSize) < Size;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKS_H_
diff --git a/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cc b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cc
new file mode 100644
index 0000000..3ae1cdc
--- /dev/null
+++ b/src/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cc
@@ -0,0 +1,107 @@
+//===-- wrappers_cpp.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+
+#include <stdint.h>
+
+extern scudo::Allocator<scudo::Config> *AllocatorPtr;
+
+namespace std {
+struct nothrow_t {};
+enum class align_val_t : size_t {};
+} // namespace std
+
+INTERFACE WEAK void *operator new(size_t size) {
+  return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void *operator new[](size_t size) {
+  return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void *operator new(size_t size,
+                                  std::nothrow_t const &) NOEXCEPT {
+  return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void *operator new[](size_t size,
+                                    std::nothrow_t const &) NOEXCEPT {
+  return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
+  return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
+                                static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
+  return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
+                                static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
+                                  std::nothrow_t const &) NOEXCEPT {
+  return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
+                                static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
+                                    std::nothrow_t const &) NOEXCEPT {
+  return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
+                                static_cast<scudo::uptr>(align));
+}
+
+INTERFACE WEAK void operator delete(void *ptr)NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+                                      std::nothrow_t const &) NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size)NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size);
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align)NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
+                           static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+                                      std::align_val_t align) NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+                           static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
+                                    std::nothrow_t const &)NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
+                           static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
+                                      std::nothrow_t const &) NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+                           static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size,
+                                    std::align_val_t align)NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size,
+                           static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size,
+                                      std::align_val_t align) NOEXCEPT {
+  AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
+                           static_cast<scudo::uptr>(align));
+}
+
+#endif // !SCUDO_ANDROID || !_BIONIC