storage: add storage-specific scudo config

Optimized scudo config to reduce heap usage when using storage with
scudo.

Bug: 231151082
Test: storage-unittest

Change-Id: I856b9a90b385fe009ed730c6c644eacd770ae1d2
diff --git a/manifest-scudo.json b/manifest-scudo.json
index c1465fd..4d698b6 100644
--- a/manifest-scudo.json
+++ b/manifest-scudo.json
@@ -1,6 +1,6 @@
 {
     "uuid": "cea8706d-6cb4-49f3-b994-29e0e478bd29",
-    "min_heap": 131072,
+    "min_heap": 40960,
     "min_stack": 16384,
     "start_ports": [
         {
diff --git a/scudo/config/allocator_config.h b/scudo/config/allocator_config.h
new file mode 100644
index 0000000..866cc9f
--- /dev/null
+++ b/scudo/config/allocator_config.h
@@ -0,0 +1,226 @@
+//===-- allocator_config.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_CONFIG_H_
+#define SCUDO_ALLOCATOR_CONFIG_H_
+
+#include "combined.h"
+#include "common.h"
+#include "flags.h"
+#include "primary32.h"
+#include "primary64.h"
+#include "secondary.h"
+#include "size_class_map.h"
+#include "tsd_exclusive.h"
+#include "tsd_shared.h"
+
+namespace scudo {
+
+// The combined allocator uses a structure as a template argument that
+// specifies the configuration options for the various subcomponents of the
+// allocator.
+//
+// struct ExampleConfig {
+//   // SizeClassMap to use with the Primary.
+//   using SizeClassMap = DefaultSizeClassMap;
+//   // Indicates possible support for Memory Tagging.
+//   static const bool MaySupportMemoryTagging = false;
+//   // Defines the Primary allocator to use.
+//   typedef SizeClassAllocator64<ExampleConfig> Primary;
+//   // Log2 of the size of a size class region, as used by the Primary.
+//   static const uptr PrimaryRegionSizeLog = 30U;
+//   // Log2 of the size of block group, as used by the Primary. Each group
+//   // contains a range of memory addresses, blocks in the range will belong to
+//   // the same group. In general, single region may have 1 or 2MB group size.
+//   // Multiple regions will have the group size equal to the region size
+//   // because the region size is usually smaller than 1 MB.
+//   // Smaller value gives fine-grained control of memory usage but the trade
+//   // off is that it may take longer time of deallocation.
+//   static const uptr PrimaryGroupSizeLog = 20U;
+//   // Defines the type and scale of a compact pointer. A compact pointer can
+//   // be understood as the offset of a pointer within the region it belongs
+//   // to, in increments of a power-of-2 scale.
+//   // eg: Ptr = Base + (CompactPtr << Scale).
+//   typedef u32 PrimaryCompactPtrT;
+//   static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+//   // Indicates support for offsetting the start of a region by
+//   // a random number of pages. Only used with primary64.
+//   static const bool PrimaryEnableRandomOffset = true;
+//   // Call map for user memory with at least this size. Only used with
+//   // primary64.
+//   static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+//   // Defines the minimal & maximal release interval that can be set.
+//   static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
+//   static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+//   // Defines the type of cache used by the Secondary. Some additional
+//   // configuration entries can be necessary depending on the Cache.
+//   typedef MapAllocatorNoCache SecondaryCache;
+//   // Thread-Specific Data Registry used, shared or exclusive.
+//   template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>;
+// };
+
+// Default configurations for various platforms.
+
+struct DefaultConfig {
+  using SizeClassMap = DefaultSizeClassMap;
+  static const bool MaySupportMemoryTagging = true;
+
+#if SCUDO_CAN_USE_PRIMARY64
+  typedef SizeClassAllocator64<DefaultConfig> Primary;
+  static const uptr PrimaryRegionSizeLog = 32U;
+  static const uptr PrimaryGroupSizeLog = 21U;
+  typedef uptr PrimaryCompactPtrT;
+  static const uptr PrimaryCompactPtrScale = 0;
+  static const bool PrimaryEnableRandomOffset = true;
+  static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+#else
+  typedef SizeClassAllocator32<DefaultConfig> Primary;
+  static const uptr PrimaryRegionSizeLog = 19U;
+  static const uptr PrimaryGroupSizeLog = 19U;
+  typedef uptr PrimaryCompactPtrT;
+#endif
+  static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
+  static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+
+  typedef MapAllocatorCache<DefaultConfig> SecondaryCache;
+  static const u32 SecondaryCacheEntriesArraySize = 32U;
+  static const u32 SecondaryCacheQuarantineSize = 0U;
+  static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
+  static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 19;
+  static const s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
+  static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = INT32_MAX;
+
+  template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+};
+struct AndroidConfig {
+  using SizeClassMap = AndroidSizeClassMap;
+  static const bool MaySupportMemoryTagging = true;
+
+#if SCUDO_CAN_USE_PRIMARY64
+  typedef SizeClassAllocator64<AndroidConfig> Primary;
+  static const uptr PrimaryRegionSizeLog = 28U;
+  typedef u32 PrimaryCompactPtrT;
+  static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+  static const uptr PrimaryGroupSizeLog = 20U;
+  static const bool PrimaryEnableRandomOffset = true;
+  static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+#else
+  typedef SizeClassAllocator32<AndroidConfig> Primary;
+  static const uptr PrimaryRegionSizeLog = 18U;
+  static const uptr PrimaryGroupSizeLog = 18U;
+  typedef uptr PrimaryCompactPtrT;
+#endif
+  static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
+  static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
+
+  typedef MapAllocatorCache<AndroidConfig> SecondaryCache;
+  static const u32 SecondaryCacheEntriesArraySize = 256U;
+  static const u32 SecondaryCacheQuarantineSize = 32U;
+  static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
+  static const uptr SecondaryCacheDefaultMaxEntrySize = 2UL << 20;
+  static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
+  static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 1000;
+
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
+};
+
+struct AndroidSvelteConfig {
+  using SizeClassMap = SvelteSizeClassMap;
+  static const bool MaySupportMemoryTagging = false;
+
+#if SCUDO_CAN_USE_PRIMARY64
+  typedef SizeClassAllocator64<AndroidSvelteConfig> Primary;
+  static const uptr PrimaryRegionSizeLog = 27U;
+  typedef u32 PrimaryCompactPtrT;
+  static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+  static const uptr PrimaryGroupSizeLog = 18U;
+  static const bool PrimaryEnableRandomOffset = true;
+  static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+#else
+  typedef SizeClassAllocator32<AndroidSvelteConfig> Primary;
+  static const uptr PrimaryRegionSizeLog = 16U;
+  static const uptr PrimaryGroupSizeLog = 16U;
+  typedef uptr PrimaryCompactPtrT;
+#endif
+  static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
+  static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
+
+  typedef MapAllocatorCache<AndroidSvelteConfig> SecondaryCache;
+  static const u32 SecondaryCacheEntriesArraySize = 16U;
+  static const u32 SecondaryCacheQuarantineSize = 32U;
+  static const u32 SecondaryCacheDefaultMaxEntriesCount = 4U;
+  static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 18;
+  static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
+  static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 0;
+
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
+};
+
+#if SCUDO_CAN_USE_PRIMARY64
+struct FuchsiaConfig {
+  using SizeClassMap = FuchsiaSizeClassMap;
+  static const bool MaySupportMemoryTagging = false;
+
+  typedef SizeClassAllocator64<FuchsiaConfig> Primary;
+// Support 39-bit VMA for riscv-64
+#if SCUDO_RISCV64
+  static const uptr PrimaryRegionSizeLog = 28U;
+  static const uptr PrimaryGroupSizeLog = 19U;
+#else
+  static const uptr PrimaryRegionSizeLog = 30U;
+  static const uptr PrimaryGroupSizeLog = 21U;
+#endif
+  typedef u32 PrimaryCompactPtrT;
+  static const bool PrimaryEnableRandomOffset = true;
+  static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+  static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+  static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
+  static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+
+  typedef MapAllocatorNoCache SecondaryCache;
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
+};
+
+struct TrustyConfig {
+  using SizeClassMap = TrustySizeClassMap;
+  static const bool MaySupportMemoryTagging = true;
+
+  typedef SizeClassAllocator64<TrustyConfig> Primary;
+  // Some apps have 1 page of heap total so small regions are necessary.
+  static const uptr PrimaryRegionSizeLog = 28U;
+  static const uptr PrimaryGroupSizeLog = 20U;
+  typedef u32 PrimaryCompactPtrT;
+  static const bool PrimaryEnableRandomOffset = false;
+  // Trusty is extremely memory-constrained so minimally round up map calls.
+  static const uptr PrimaryMapSizeIncrement = 1UL << 12;
+  static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+  static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
+  static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+
+  typedef MapAllocatorNoCache SecondaryCache;
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 1U, 1U>; // Shared, max 1 TSD.
+};
+#endif
+
+#if SCUDO_ANDROID
+typedef AndroidConfig Config;
+#elif SCUDO_FUCHSIA
+typedef FuchsiaConfig Config;
+#elif SCUDO_TRUSTY
+typedef TrustyConfig Config;
+#else
+typedef DefaultConfig Config;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_CONFIG_H_
diff --git a/scudo/config/size_class_map.h b/scudo/config/size_class_map.h
new file mode 100644
index 0000000..e0c0d23
--- /dev/null
+++ b/scudo/config/size_class_map.h
@@ -0,0 +1,385 @@
+//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SIZE_CLASS_MAP_H_
+#define SCUDO_SIZE_CLASS_MAP_H_
+
+#include "chunk.h"
+#include "common.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) {
+  const uptr L = getMostSignificantSetBitIndex(Size);
+  const uptr LBits = (Size >> (L - LogBits)) - (1 << LogBits);
+  const uptr HBits = (L - ZeroLog) << LogBits;
+  return LBits + HBits;
+}
+
+template <typename Config> struct SizeClassMapBase {
+  static u16 getMaxCachedHint(uptr Size) {
+    DCHECK_NE(Size, 0);
+    u32 N;
+    // Force a 32-bit division if the template parameters allow for it.
+    if (Config::MaxBytesCachedLog > 31 || Config::MaxSizeLog > 31)
+      N = static_cast<u32>((1UL << Config::MaxBytesCachedLog) / Size);
+    else
+      N = (1U << Config::MaxBytesCachedLog) / static_cast<u32>(Size);
+
+    // Note that Config::MaxNumCachedHint is u16 so the result is guaranteed to
+    // fit in u16.
+    return static_cast<u16>(Max(1U, Min<u32>(Config::MaxNumCachedHint, N)));
+  }
+};
+
+// SizeClassMap maps allocation sizes into size classes and back, in an
+// efficient table-free manner.
+//
+// Class 0 is a special class that doesn't abide by the same rules as other
+// classes. The allocator uses it to hold batches.
+//
+// The other sizes are controlled by the template parameters:
+// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
+// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
+// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
+//               2^MidSizeLog bytes.
+// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
+//            eg. with NumBits==3 all size classes after 2^MidSizeLog look like
+//            0b1xx0..0 (where x is either 0 or 1).
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that can be cached per-thread:
+// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
+// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
+template <typename Config>
+class FixedSizeClassMap : public SizeClassMapBase<Config> {
+  typedef SizeClassMapBase<Config> Base;
+
+  static const uptr MinSize = 1UL << Config::MinSizeLog;
+  static const uptr MidSize = 1UL << Config::MidSizeLog;
+  static const uptr MidClass = MidSize / MinSize;
+  static const u8 S = Config::NumBits - 1;
+  static const uptr M = (1UL << S) - 1;
+
+public:
+  static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
+
+  static const uptr MaxSize = (1UL << Config::MaxSizeLog) + Config::SizeDelta;
+  static const uptr NumClasses =
+      MidClass + ((Config::MaxSizeLog - Config::MidSizeLog) << S) + 1;
+  static_assert(NumClasses <= 256, "");
+  static const uptr LargestClassId = NumClasses - 1;
+  static const uptr BatchClassId = 0;
+
+  static uptr getSizeByClassId(uptr ClassId) {
+    DCHECK_NE(ClassId, BatchClassId);
+    if (ClassId <= MidClass)
+      return (ClassId << Config::MinSizeLog) + Config::SizeDelta;
+    ClassId -= MidClass;
+    const uptr T = MidSize << (ClassId >> S);
+    return T + (T >> S) * (ClassId & M) + Config::SizeDelta;
+  }
+
+  static u8 getSizeLSBByClassId(uptr ClassId) {
+    return u8(getLeastSignificantSetBitIndex(getSizeByClassId(ClassId)));
+  }
+
+  static constexpr bool usesCompressedLSBFormat() { return false; }
+
+  static uptr getClassIdBySize(uptr Size) {
+    if (Size <= Config::SizeDelta + (1 << Config::MinSizeLog)) {
+      return 1;
+    }
+    Size -= Config::SizeDelta;
+    DCHECK_LE(Size, MaxSize);
+    if (Size <= MidSize)
+      return (Size + MinSize - 1) >> Config::MinSizeLog;
+    return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
+  }
+
+  static u16 getMaxCachedHint(uptr Size) {
+    DCHECK_LE(Size, MaxSize);
+    return Base::getMaxCachedHint(Size);
+  }
+};
+
+template <typename Config>
+class TableSizeClassMap : public SizeClassMapBase<Config> {
+  typedef SizeClassMapBase<Config> Base;
+
+  static const u8 S = Config::NumBits - 1;
+  static const uptr M = (1UL << S) - 1;
+  static const uptr ClassesSize =
+      sizeof(Config::Classes) / sizeof(Config::Classes[0]);
+
+  struct SizeTable {
+    constexpr SizeTable() {
+      uptr Pos = 1 << Config::MidSizeLog;
+      uptr Inc = 1 << (Config::MidSizeLog - S);
+      for (uptr i = 0; i != getTableSize(); ++i) {
+        Pos += Inc;
+        if ((Pos & (Pos - 1)) == 0)
+          Inc *= 2;
+        Tab[i] = computeClassId(Pos + Config::SizeDelta);
+      }
+    }
+
+    constexpr static u8 computeClassId(uptr Size) {
+      for (uptr i = 0; i != ClassesSize; ++i) {
+        if (Size <= Config::Classes[i])
+          return static_cast<u8>(i + 1);
+      }
+      return static_cast<u8>(-1);
+    }
+
+    constexpr static uptr getTableSize() {
+      return (Config::MaxSizeLog - Config::MidSizeLog) << S;
+    }
+
+    u8 Tab[getTableSize()] = {};
+  };
+
+  static constexpr SizeTable SzTable = {};
+
+  struct LSBTable {
+    constexpr LSBTable() {
+      u8 Min = 255, Max = 0;
+      for (uptr I = 0; I != ClassesSize; ++I) {
+        for (u8 Bit = 0; Bit != 64; ++Bit) {
+          if (Config::Classes[I] & (1 << Bit)) {
+            Tab[I] = Bit;
+            if (Bit < Min)
+              Min = Bit;
+            if (Bit > Max)
+              Max = Bit;
+            break;
+          }
+        }
+      }
+
+      if (Max - Min > 3 || ClassesSize > 32)
+        return;
+
+      UseCompressedFormat = true;
+      CompressedMin = Min;
+      for (uptr I = 0; I != ClassesSize; ++I)
+        CompressedValue |= u64(Tab[I] - Min) << (I * 2);
+    }
+
+    u8 Tab[ClassesSize] = {};
+
+    bool UseCompressedFormat = false;
+    u8 CompressedMin = 0;
+    u64 CompressedValue = 0;
+  };
+
+  static constexpr LSBTable LTable = {};
+
+public:
+  static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
+
+  static const uptr NumClasses = ClassesSize + 1;
+  static_assert(NumClasses < 256, "");
+  static const uptr LargestClassId = NumClasses - 1;
+  static const uptr BatchClassId = 0;
+  static const uptr MaxSize = Config::Classes[LargestClassId - 1];
+
+  static uptr getSizeByClassId(uptr ClassId) {
+    return Config::Classes[ClassId - 1];
+  }
+
+  static u8 getSizeLSBByClassId(uptr ClassId) {
+    if (LTable.UseCompressedFormat)
+      return ((LTable.CompressedValue >> ((ClassId - 1) * 2)) & 3) +
+             LTable.CompressedMin;
+    else
+      return LTable.Tab[ClassId - 1];
+  }
+
+  static constexpr bool usesCompressedLSBFormat() {
+    return LTable.UseCompressedFormat;
+  }
+
+  static uptr getClassIdBySize(uptr Size) {
+    if (Size <= Config::Classes[0])
+      return 1;
+    Size -= Config::SizeDelta;
+    DCHECK_LE(Size, MaxSize);
+    if (Size <= (1 << Config::MidSizeLog))
+      return ((Size - 1) >> Config::MinSizeLog) + 1;
+    return SzTable.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
+  }
+
+  static u16 getMaxCachedHint(uptr Size) {
+    DCHECK_LE(Size, MaxSize);
+    return Base::getMaxCachedHint(Size);
+  }
+};
+
+struct DefaultSizeClassConfig {
+  static const uptr NumBits = 3;
+  static const uptr MinSizeLog = 5;
+  static const uptr MidSizeLog = 8;
+  static const uptr MaxSizeLog = 17;
+  static const u16 MaxNumCachedHint = 14;
+  static const uptr MaxBytesCachedLog = 10;
+  static const uptr SizeDelta = 0;
+};
+
+typedef FixedSizeClassMap<DefaultSizeClassConfig> DefaultSizeClassMap;
+
+struct FuchsiaSizeClassConfig {
+  static const uptr NumBits = 3;
+  static const uptr MinSizeLog = 5;
+  static const uptr MidSizeLog = 8;
+  static const uptr MaxSizeLog = 17;
+  static const u16 MaxNumCachedHint = 12;
+  static const uptr MaxBytesCachedLog = 10;
+  static const uptr SizeDelta = Chunk::getHeaderSize();
+};
+
+typedef FixedSizeClassMap<FuchsiaSizeClassConfig> FuchsiaSizeClassMap;
+
+struct AndroidSizeClassConfig {
+#if SCUDO_WORDSIZE == 64U
+  static const uptr NumBits = 7;
+  static const uptr MinSizeLog = 4;
+  static const uptr MidSizeLog = 6;
+  static const uptr MaxSizeLog = 16;
+  static const u16 MaxNumCachedHint = 13;
+  static const uptr MaxBytesCachedLog = 13;
+
+  static constexpr u32 Classes[] = {
+      0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00090, 0x000b0,
+      0x000c0, 0x000e0, 0x00120, 0x00160, 0x001c0, 0x00250, 0x00320, 0x00450,
+      0x00670, 0x00830, 0x00a10, 0x00c30, 0x01010, 0x01210, 0x01bd0, 0x02210,
+      0x02d90, 0x03790, 0x04010, 0x04810, 0x05a10, 0x07310, 0x08210, 0x10010,
+  };
+  static const uptr SizeDelta = 16;
+#else
+  static const uptr NumBits = 8;
+  static const uptr MinSizeLog = 4;
+  static const uptr MidSizeLog = 7;
+  static const uptr MaxSizeLog = 16;
+  static const u16 MaxNumCachedHint = 14;
+  static const uptr MaxBytesCachedLog = 13;
+
+  static constexpr u32 Classes[] = {
+      0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00080, 0x00090,
+      0x000a0, 0x000b0, 0x000c0, 0x000e0, 0x000f0, 0x00110, 0x00120, 0x00130,
+      0x00150, 0x00160, 0x00170, 0x00190, 0x001d0, 0x00210, 0x00240, 0x002a0,
+      0x00330, 0x00370, 0x003a0, 0x00400, 0x00430, 0x004a0, 0x00530, 0x00610,
+      0x00730, 0x00840, 0x00910, 0x009c0, 0x00a60, 0x00b10, 0x00ca0, 0x00e00,
+      0x00fb0, 0x01030, 0x01130, 0x011f0, 0x01490, 0x01650, 0x01930, 0x02010,
+      0x02190, 0x02490, 0x02850, 0x02d50, 0x03010, 0x03210, 0x03c90, 0x04090,
+      0x04510, 0x04810, 0x05c10, 0x06f10, 0x07310, 0x08010, 0x0c010, 0x10010,
+  };
+  static const uptr SizeDelta = 16;
+#endif
+};
+
+typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap;
+
+#if SCUDO_WORDSIZE == 64U && defined(__clang__)
+static_assert(AndroidSizeClassMap::usesCompressedLSBFormat(), "");
+#endif
+
+struct SvelteSizeClassConfig {
+#if SCUDO_WORDSIZE == 64U
+  static const uptr NumBits = 4;
+  static const uptr MinSizeLog = 4;
+  static const uptr MidSizeLog = 8;
+  static const uptr MaxSizeLog = 14;
+  static const u16 MaxNumCachedHint = 13;
+  static const uptr MaxBytesCachedLog = 10;
+  static const uptr SizeDelta = Chunk::getHeaderSize();
+#else
+  static const uptr NumBits = 4;
+  static const uptr MinSizeLog = 3;
+  static const uptr MidSizeLog = 7;
+  static const uptr MaxSizeLog = 14;
+  static const u16 MaxNumCachedHint = 14;
+  static const uptr MaxBytesCachedLog = 10;
+  static const uptr SizeDelta = Chunk::getHeaderSize();
+#endif
+};
+
+typedef FixedSizeClassMap<SvelteSizeClassConfig> SvelteSizeClassMap;
+
+/*
+ * generated using the "compute_size_class_config_tool" based on observed
+ * allocations from the storage app, then manually tweaked to remove one
+ * size class that only had one allocation in it, and to lower the cache
+ * settings.
+ */
+struct TrustySizeClassConfig {
+  static const uptr NumBits = 6;
+  static const uptr MinSizeLog = 5;
+  static const uptr MidSizeLog = 5;
+  static const uptr MaxSizeLog = 15;
+  static const u16 MaxNumCachedHint = 12;
+  static const uptr MaxBytesCachedLog = 10;
+
+  static constexpr u32 Classes[] = {
+      0x00040, 0x00080, 0x00090, /*0x000b0,*/ 0x00150, 0x00490, 0x01090,
+  };
+  static const uptr SizeDelta = 16;
+};
+typedef TableSizeClassMap<TrustySizeClassConfig> TrustySizeClassMap;
+
+template <typename SCMap> inline void printMap() {
+  ScopedString Buffer;
+  uptr PrevS = 0;
+  uptr TotalCached = 0;
+  for (uptr I = 0; I < SCMap::NumClasses; I++) {
+    if (I == SCMap::BatchClassId)
+      continue;
+    const uptr S = SCMap::getSizeByClassId(I);
+    const uptr D = S - PrevS;
+    const uptr P = PrevS ? (D * 100 / PrevS) : 0;
+    const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
+    const uptr Cached = SCMap::getMaxCachedHint(S) * S;
+    Buffer.append(
+        "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %u %zu; id %zu\n", I,
+        S, D, P, L, SCMap::getMaxCachedHint(S), Cached,
+        SCMap::getClassIdBySize(S));
+    TotalCached += Cached;
+    PrevS = S;
+  }
+  Buffer.append("Total Cached: %zu\n", TotalCached);
+  Buffer.output();
+}
+
+template <typename SCMap> static UNUSED void validateMap() {
+  for (uptr C = 0; C < SCMap::NumClasses; C++) {
+    if (C == SCMap::BatchClassId)
+      continue;
+    const uptr S = SCMap::getSizeByClassId(C);
+    CHECK_NE(S, 0U);
+    CHECK_EQ(SCMap::getClassIdBySize(S), C);
+    if (C < SCMap::LargestClassId)
+      CHECK_EQ(SCMap::getClassIdBySize(S + 1), C + 1);
+    CHECK_EQ(SCMap::getClassIdBySize(S - 1), C);
+    if (C - 1 != SCMap::BatchClassId)
+      CHECK_GT(SCMap::getSizeByClassId(C), SCMap::getSizeByClassId(C - 1));
+  }
+  // Do not perform the loop if the maximum size is too large.
+  if (SCMap::MaxSize > (1 << 19))
+    return;
+  for (uptr S = 1; S <= SCMap::MaxSize; S++) {
+    const uptr C = SCMap::getClassIdBySize(S);
+    CHECK_LT(C, SCMap::NumClasses);
+    CHECK_GE(SCMap::getSizeByClassId(C), S);
+    if (C - 1 != SCMap::BatchClassId)
+      CHECK_LT(SCMap::getSizeByClassId(C - 1), S);
+  }
+}
+} // namespace scudo
+
+#endif // SCUDO_SIZE_CLASS_MAP_H_
diff --git a/scudo/rules.mk b/scudo/rules.mk
new file mode 100644
index 0000000..7e90de2
--- /dev/null
+++ b/scudo/rules.mk
@@ -0,0 +1,69 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_DIR := $(GET_LOCAL_DIR)
+
+MODULE := $(LOCAL_DIR)
+
+SCUDO_DIR := $(call FIND_EXTERNAL,scudo)
+
+MODULE_INCLUDES += \
+	$(SCUDO_DIR)/standalone \
+	$(SCUDO_DIR)/standalone/include \
+
+# These C/C++ flags are copied from the Android.bp build rules for Scudo.
+MODULE_CFLAGS += \
+	-fno-rtti \
+	-fno-stack-protector \
+	-fno-emulated-tls \
+	-Wno-unused-result \
+	-DSCUDO_MIN_ALIGNMENT_LOG=4 \
+
+MODULE_CPPFLAGS += \
+	-fno-exceptions \
+	-nostdinc++ \
+
+# scudo should be freestanding, but the rest of the app should not be.
+MODULE_COMPILEFLAGS += -ffreestanding
+
+# WARNING: while libstdc++-trusty continues to define `new` and `delete`,
+# it's possible that the symbols for those will be chosen over the ones
+# Scudo defines (also weak). None of the C++ sources below require any
+# STL headers but, if that changes, care will need to be taken to avoid
+# non-Scudo-defined `new` and `delete` from getting linked when STL headers
+# are desired.
+MODULE_SRCS += \
+	$(SCUDO_DIR)/standalone/checksum.cpp \
+	$(SCUDO_DIR)/standalone/common.cpp \
+	$(SCUDO_DIR)/standalone/crc32_hw.cpp \
+	$(SCUDO_DIR)/standalone/flags.cpp \
+	$(SCUDO_DIR)/standalone/flags_parser.cpp \
+	$(SCUDO_DIR)/standalone/mem_map.cpp \
+	$(SCUDO_DIR)/standalone/release.cpp \
+	$(SCUDO_DIR)/standalone/report.cpp \
+	$(SCUDO_DIR)/standalone/string_utils.cpp \
+	$(SCUDO_DIR)/standalone/trusty.cpp \
+	$(SCUDO_DIR)/standalone/rss_limit_checker.cpp \
+	$(LOCAL_DIR)/wrappers.cpp \
+
+# Add dependency on syscall-stubs
+MODULE_LIBRARY_DEPS += trusty/user/base/lib/syscall-stubs
+
+# Add src dependency on syscall header to ensure it is generated before we try
+# to build
+include trusty/user/base/lib/syscall-stubs/common-inc.mk
+MODULE_SRCDEPS += $(SYSCALL_H)
+
+include make/library.mk
diff --git a/scudo/wrappers.cpp b/scudo/wrappers.cpp
new file mode 100644
index 0000000..714657a
--- /dev/null
+++ b/scudo/wrappers.cpp
@@ -0,0 +1,5 @@
+#include "config/size_class_map.h"
+#include "config/allocator_config.h"
+
+#include "wrappers_c.cpp"
+#include "wrappers_cpp.cpp"